3198 3205 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 | // SPDX-License-Identifier: GPL-2.0 /* * The USB Monitor, inspired by Dave Harding's USBMon. * * mon_main.c: Main file, module initiation and exit, registrations, etc. * * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/mutex.h> #include "usb_mon.h" static void mon_stop(struct mon_bus *mbus); static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus); static void mon_bus_drop(struct kref *r); static void mon_bus_init(struct usb_bus *ubus); DEFINE_MUTEX(mon_lock); struct mon_bus mon_bus0; /* Pseudo bus meaning "all buses" */ static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */ /* * Link a reader into the bus. * * This must be called with mon_lock taken because of mbus->ref. */ void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r) { unsigned long flags; struct list_head *p; spin_lock_irqsave(&mbus->lock, flags); if (mbus->nreaders == 0) { if (mbus == &mon_bus0) { list_for_each (p, &mon_buses) { struct mon_bus *m1; m1 = list_entry(p, struct mon_bus, bus_link); m1->u_bus->monitored = 1; } } else { mbus->u_bus->monitored = 1; } } mbus->nreaders++; list_add_tail(&r->r_link, &mbus->r_list); spin_unlock_irqrestore(&mbus->lock, flags); kref_get(&mbus->ref); } /* * Unlink reader from the bus. * * This is called with mon_lock taken, so we can decrement mbus->ref. */ void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r) { unsigned long flags; spin_lock_irqsave(&mbus->lock, flags); list_del(&r->r_link); --mbus->nreaders; if (mbus->nreaders == 0) mon_stop(mbus); spin_unlock_irqrestore(&mbus->lock, flags); kref_put(&mbus->ref, mon_bus_drop); } /* */ static void mon_bus_submit(struct mon_bus *mbus, struct urb *urb) { unsigned long flags; struct mon_reader *r; spin_lock_irqsave(&mbus->lock, flags); mbus->cnt_events++; list_for_each_entry(r, &mbus->r_list, r_link) r->rnf_submit(r->r_data, urb); spin_unlock_irqrestore(&mbus->lock, flags); } static void mon_submit(struct usb_bus *ubus, struct urb *urb) { struct mon_bus *mbus; mbus = ubus->mon_bus; if (mbus != NULL) mon_bus_submit(mbus, urb); mon_bus_submit(&mon_bus0, urb); } /* */ static void mon_bus_submit_error(struct mon_bus *mbus, struct urb *urb, int error) { unsigned long flags; struct mon_reader *r; spin_lock_irqsave(&mbus->lock, flags); mbus->cnt_events++; list_for_each_entry(r, &mbus->r_list, r_link) r->rnf_error(r->r_data, urb, error); spin_unlock_irqrestore(&mbus->lock, flags); } static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error) { struct mon_bus *mbus; mbus = ubus->mon_bus; if (mbus != NULL) mon_bus_submit_error(mbus, urb, error); mon_bus_submit_error(&mon_bus0, urb, error); } /* */ static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb, int status) { unsigned long flags; struct mon_reader *r; spin_lock_irqsave(&mbus->lock, flags); mbus->cnt_events++; list_for_each_entry(r, &mbus->r_list, r_link) r->rnf_complete(r->r_data, urb, status); spin_unlock_irqrestore(&mbus->lock, flags); } static void mon_complete(struct usb_bus *ubus, struct urb *urb, int status) { struct mon_bus *mbus; mbus = ubus->mon_bus; if (mbus != NULL) mon_bus_complete(mbus, urb, status); mon_bus_complete(&mon_bus0, urb, status); } /* int (*unlink_urb) (struct urb *urb, int status); */ /* * Stop monitoring. */ static void mon_stop(struct mon_bus *mbus) { struct usb_bus *ubus; if (mbus == &mon_bus0) { list_for_each_entry(mbus, &mon_buses, bus_link) { /* * We do not change nreaders here, so rely on mon_lock. */ if (mbus->nreaders == 0 && (ubus = mbus->u_bus) != NULL) ubus->monitored = 0; } } else { /* * A stop can be called for a dissolved mon_bus in case of * a reader staying across an rmmod foo_hcd, so test ->u_bus. */ if (mon_bus0.nreaders == 0 && (ubus = mbus->u_bus) != NULL) { ubus->monitored = 0; mb(); } } } /* * Add a USB bus (usually by a modprobe foo-hcd) * * This does not return an error code because the core cannot care less * if monitoring is not established. */ static void mon_bus_add(struct usb_bus *ubus) { mon_bus_init(ubus); mutex_lock(&mon_lock); if (mon_bus0.nreaders != 0) ubus->monitored = 1; mutex_unlock(&mon_lock); } /* * Remove a USB bus (either from rmmod foo-hcd or from a hot-remove event). */ static void mon_bus_remove(struct usb_bus *ubus) { struct mon_bus *mbus = ubus->mon_bus; mutex_lock(&mon_lock); list_del(&mbus->bus_link); if (mbus->text_inited) mon_text_del(mbus); if (mbus->bin_inited) mon_bin_del(mbus); mon_dissolve(mbus, ubus); kref_put(&mbus->ref, mon_bus_drop); mutex_unlock(&mon_lock); } static int mon_notify(struct notifier_block *self, unsigned long action, void *dev) { switch (action) { case USB_BUS_ADD: mon_bus_add(dev); break; case USB_BUS_REMOVE: mon_bus_remove(dev); } return NOTIFY_OK; } static struct notifier_block mon_nb = { .notifier_call = mon_notify, }; /* * Ops */ static const struct usb_mon_operations mon_ops_0 = { .urb_submit = mon_submit, .urb_submit_error = mon_submit_error, .urb_complete = mon_complete, }; /* * Tear usb_bus and mon_bus apart. */ static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus) { if (ubus->monitored) { ubus->monitored = 0; mb(); } ubus->mon_bus = NULL; mbus->u_bus = NULL; mb(); /* We want synchronize_irq() here, but that needs an argument. */ } /* */ static void mon_bus_drop(struct kref *r) { struct mon_bus *mbus = container_of(r, struct mon_bus, ref); kfree(mbus); } /* * Initialize a bus for us: * - allocate mon_bus * - refcount USB bus struct * - link */ static void mon_bus_init(struct usb_bus *ubus) { struct mon_bus *mbus; mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL); if (mbus == NULL) goto err_alloc; kref_init(&mbus->ref); spin_lock_init(&mbus->lock); INIT_LIST_HEAD(&mbus->r_list); /* * We don't need to take a reference to ubus, because we receive * a notification if the bus is about to be removed. */ mbus->u_bus = ubus; ubus->mon_bus = mbus; mbus->text_inited = mon_text_add(mbus, ubus); mbus->bin_inited = mon_bin_add(mbus, ubus); mutex_lock(&mon_lock); list_add_tail(&mbus->bus_link, &mon_buses); mutex_unlock(&mon_lock); return; err_alloc: return; } static void mon_bus0_init(void) { struct mon_bus *mbus = &mon_bus0; kref_init(&mbus->ref); spin_lock_init(&mbus->lock); INIT_LIST_HEAD(&mbus->r_list); mbus->text_inited = mon_text_add(mbus, NULL); mbus->bin_inited = mon_bin_add(mbus, NULL); } /* * Search a USB bus by number. Notice that USB bus numbers start from one, * which we may later use to identify "all" with zero. * * This function must be called with mon_lock held. * * This is obviously inefficient and may be revised in the future. */ struct mon_bus *mon_bus_lookup(unsigned int num) { struct mon_bus *mbus; if (num == 0) { return &mon_bus0; } list_for_each_entry(mbus, &mon_buses, bus_link) { if (mbus->u_bus->busnum == num) { return mbus; } } return NULL; } static int __init mon_init(void) { struct usb_bus *ubus; int rc, id; if ((rc = mon_text_init()) != 0) goto err_text; if ((rc = mon_bin_init()) != 0) goto err_bin; mon_bus0_init(); if (usb_mon_register(&mon_ops_0) != 0) { printk(KERN_NOTICE TAG ": unable to register with the core\n"); rc = -ENODEV; goto err_reg; } // MOD_INC_USE_COUNT(which_module?); mutex_lock(&usb_bus_idr_lock); idr_for_each_entry(&usb_bus_idr, ubus, id) mon_bus_init(ubus); usb_register_notify(&mon_nb); mutex_unlock(&usb_bus_idr_lock); return 0; err_reg: mon_bin_exit(); err_bin: mon_text_exit(); err_text: return rc; } static void __exit mon_exit(void) { struct mon_bus *mbus; struct list_head *p; usb_unregister_notify(&mon_nb); usb_mon_deregister(); mutex_lock(&mon_lock); while (!list_empty(&mon_buses)) { p = mon_buses.next; mbus = list_entry(p, struct mon_bus, bus_link); list_del(p); if (mbus->text_inited) mon_text_del(mbus); if (mbus->bin_inited) mon_bin_del(mbus); /* * This never happens, because the open/close paths in * file level maintain module use counters and so rmmod fails * before reaching here. However, better be safe... */ if (mbus->nreaders) { printk(KERN_ERR TAG ": Outstanding opens (%d) on usb%d, leaking...\n", mbus->nreaders, mbus->u_bus->busnum); kref_get(&mbus->ref); /* Force leak */ } mon_dissolve(mbus, mbus->u_bus); kref_put(&mbus->ref, mon_bus_drop); } mbus = &mon_bus0; if (mbus->text_inited) mon_text_del(mbus); if (mbus->bin_inited) mon_bin_del(mbus); mutex_unlock(&mon_lock); mon_text_exit(); mon_bin_exit(); } module_init(mon_init); module_exit(mon_exit); MODULE_DESCRIPTION("USB Monitor"); MODULE_LICENSE("GPL"); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 | /* SPDX-License-Identifier: ISC */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> */ #ifndef __MT76x02_H #define __MT76x02_H #include <linux/kfifo.h> #include "mt76.h" #include "mt76x02_regs.h" #include "mt76x02_mac.h" #include "mt76x02_dfs.h" #include "mt76x02_dma.h" #define MT76x02_TX_RING_SIZE 512 #define MT76x02_PSD_RING_SIZE 128 #define MT76x02_N_WCIDS 128 #define MT_CALIBRATE_INTERVAL HZ #define MT_MAC_WORK_INTERVAL (HZ / 10) #define MT_WATCHDOG_TIME (HZ / 10) #define MT_TX_HANG_TH 10 #define MT_MAX_CHAINS 2 struct mt76x02_rx_freq_cal { s8 high_gain[MT_MAX_CHAINS]; s8 rssi_offset[MT_MAX_CHAINS]; s8 lna_gain; u32 mcu_gain; s16 temp_offset; u8 freq_offset; }; struct mt76x02_calibration { struct mt76x02_rx_freq_cal rx; u8 agc_gain_init[MT_MAX_CHAINS]; u8 agc_gain_cur[MT_MAX_CHAINS]; u16 false_cca; s8 avg_rssi_all; s8 agc_gain_adjust; s8 agc_lowest_gain; s8 low_gain; s8 temp_vco; s8 temp; bool init_cal_done; bool tssi_cal_done; bool tssi_comp_pending; bool dpd_cal_done; bool channel_cal_done; bool gain_init_done; int tssi_target; s8 tssi_dc; }; struct mt76x02_beacon_ops { unsigned int nslots; unsigned int slot_size; void (*pre_tbtt_enable)(struct mt76x02_dev *dev, bool en); void (*beacon_enable)(struct mt76x02_dev *dev, bool en); }; #define mt76x02_beacon_enable(dev, enable) \ (dev)->beacon_ops->beacon_enable(dev, enable) #define mt76x02_pre_tbtt_enable(dev, enable) \ (dev)->beacon_ops->pre_tbtt_enable(dev, enable) struct mt76x02_rate_power { union { struct { s8 cck[4]; s8 ofdm[8]; s8 ht[16]; s8 vht[2]; }; s8 all[30]; }; }; struct mt76x02_dev { union { /* must be first */ struct mt76_dev mt76; struct mt76_phy mphy; }; struct mac_address macaddr_list[8]; struct mutex phy_mutex; u8 txdone_seq; DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); spinlock_t txstatus_fifo_lock; u32 tx_airtime; u32 ampdu_ref; struct sk_buff *rx_head; struct delayed_work cal_work; struct delayed_work wdt_work; struct hrtimer pre_tbtt_timer; struct work_struct pre_tbtt_work; const struct mt76x02_beacon_ops *beacon_ops; u8 beacon_data_count; u8 tbtt_count; u32 tx_hang_reset; u8 tx_hang_check[4]; u8 beacon_hang_check; u8 mcu_timeout; struct mt76x02_rate_power rate_power; struct mt76x02_calibration cal; int txpower_conf; s8 target_power; s8 target_power_delta[2]; bool enable_tpc; bool no_2ghz; s16 coverage_class; u8 slottime; struct mt76x02_dfs_pattern_detector dfs_pd; /* edcca monitor */ unsigned long ed_trigger_timeout; bool ed_tx_blocked; bool ed_monitor; u8 ed_monitor_enabled; u8 ed_monitor_learning; u8 ed_trigger; u8 ed_silent; ktime_t ed_time; }; extern struct ieee80211_rate mt76x02_rates[12]; int mt76x02_init_device(struct mt76x02_dev *dev); void mt76x02_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast); int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev); int mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76x02_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params); int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params); void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, const struct ieee80211_tx_rate *rate); s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj); void mt76x02_wdt_work(struct work_struct *work); void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); void mt76x02_set_tx_ackto(struct mt76x02_dev *dev); void mt76x02_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class); int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val); void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update); void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb, u32 *info); void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance); void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info); void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); void mt76x02_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed); void mt76x02_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type); struct beacon_bc_data { struct mt76x02_dev *dev; struct sk_buff_head q; struct sk_buff *tail[8]; }; void mt76x02_init_beacon_config(struct mt76x02_dev *dev); void mt76x02e_init_beacon_config(struct mt76x02_dev *dev); void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev); void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); void mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, struct beacon_bc_data *data, int max_nframes); void mt76x02_mac_start(struct mt76x02_dev *dev); void mt76x02_init_debugfs(struct mt76x02_dev *dev); static inline bool is_mt76x0(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7610 || mt76_chip(&dev->mt76) == 0x7630 || mt76_chip(&dev->mt76) == 0x7650; } static inline bool is_mt76x2(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7612 || mt76_chip(&dev->mt76) == 0x7632 || mt76_chip(&dev->mt76) == 0x7662 || mt76_chip(&dev->mt76) == 0x7602; } static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask) { mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask); } static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask) { mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); } static inline bool mt76x02_wait_for_txrx_idle(struct mt76_dev *dev) { return __mt76_poll_msec(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100); } static inline struct mt76x02_sta * mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx) { struct mt76_wcid *wcid; if (idx >= MT76x02_N_WCIDS) return NULL; wcid = rcu_dereference(dev->wcid[idx]); if (!wcid) return NULL; return container_of(wcid, struct mt76x02_sta, wcid); } static inline struct mt76_wcid * mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast) { if (!sta) return NULL; if (unicast) return &sta->wcid; else return &sta->vif->group_wcid; } #endif /* __MT76x02_H */ |
21 21 10 10 10 10 10 10 8 1 10 10 10 10 11 21 20 21 21 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 | // SPDX-License-Identifier: GPL-2.0-or-later // // Special handling for implicit feedback mode // #include <linux/init.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "usbaudio.h" #include "card.h" #include "helper.h" #include "pcm.h" #include "implicit.h" enum { IMPLICIT_FB_NONE, IMPLICIT_FB_GENERIC, IMPLICIT_FB_FIXED, IMPLICIT_FB_BOTH, /* generic playback + capture (for BOSS) */ }; struct snd_usb_implicit_fb_match { unsigned int id; unsigned int iface_class; unsigned int ep_num; unsigned int iface; int type; }; #define IMPLICIT_FB_GENERIC_DEV(vend, prod) \ { .id = USB_ID(vend, prod), .type = IMPLICIT_FB_GENERIC } #define IMPLICIT_FB_FIXED_DEV(vend, prod, ep, ifnum) \ { .id = USB_ID(vend, prod), .type = IMPLICIT_FB_FIXED, .ep_num = (ep),\ .iface = (ifnum) } #define IMPLICIT_FB_BOTH_DEV(vend, prod, ep, ifnum) \ { .id = USB_ID(vend, prod), .type = IMPLICIT_FB_BOTH, .ep_num = (ep),\ .iface = (ifnum) } #define IMPLICIT_FB_SKIP_DEV(vend, prod) \ { .id = USB_ID(vend, prod), .type = IMPLICIT_FB_NONE } /* Implicit feedback quirk table for playback */ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = { /* Fixed EP */ /* FIXME: check the availability of generic matching */ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2030, 0x81, 3), /* M-Audio Fast Track C400 */ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2031, 0x81, 3), /* M-Audio Fast Track C600 */ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2080, 0x81, 2), /* M-Audio FastTrack Ultra */ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2081, 0x81, 2), /* M-Audio FastTrack Ultra */ IMPLICIT_FB_FIXED_DEV(0x2466, 0x8010, 0x81, 2), /* Fractal Audio Axe-Fx III */ IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0001, 0x81, 2), /* Solid State Logic SSL2 */ IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0002, 0x81, 2), /* Solid State Logic SSL2+ */ IMPLICIT_FB_FIXED_DEV(0x0499, 0x172f, 0x81, 2), /* Steinberg UR22C */ IMPLICIT_FB_FIXED_DEV(0x0d9a, 0x00df, 0x81, 2), /* RTX6001 */ IMPLICIT_FB_FIXED_DEV(0x19f7, 0x000a, 0x84, 3), /* RODE AI-1 */ IMPLICIT_FB_FIXED_DEV(0x22f0, 0x0006, 0x81, 3), /* Allen&Heath Qu-16 */ IMPLICIT_FB_FIXED_DEV(0x1686, 0xf029, 0x82, 2), /* Zoom UAC-2 */ IMPLICIT_FB_FIXED_DEV(0x2466, 0x8003, 0x86, 2), /* Fractal Audio Axe-Fx II */ IMPLICIT_FB_FIXED_DEV(0x0499, 0x172a, 0x86, 2), /* Yamaha MODX */ /* Special matching */ { .id = USB_ID(0x07fd, 0x0004), .iface_class = USB_CLASS_AUDIO, .type = IMPLICIT_FB_NONE }, /* MicroBook IIc */ /* ep = 0x84, ifnum = 0 */ { .id = USB_ID(0x07fd, 0x0004), .iface_class = USB_CLASS_VENDOR_SPEC, .type = IMPLICIT_FB_FIXED, .ep_num = 0x84, .iface = 0 }, /* MOTU MicroBook II */ {} /* terminator */ }; /* Implicit feedback quirk table for capture: only FIXED type */ static const struct snd_usb_implicit_fb_match capture_implicit_fb_quirks[] = { {} /* terminator */ }; /* set up sync EP information on the audioformat */ static int add_implicit_fb_sync_ep(struct snd_usb_audio *chip, struct audioformat *fmt, int ep, int ep_idx, int ifnum, const struct usb_host_interface *alts) { struct usb_interface *iface; if (!alts) { iface = usb_ifnum_to_if(chip->dev, ifnum); if (!iface || iface->num_altsetting < 2) return 0; alts = &iface->altsetting[1]; } fmt->sync_ep = ep; fmt->sync_iface = ifnum; fmt->sync_altsetting = alts->desc.bAlternateSetting; fmt->sync_ep_idx = ep_idx; fmt->implicit_fb = 1; usb_audio_dbg(chip, "%d:%d: added %s implicit_fb sync_ep %x, iface %d:%d\n", fmt->iface, fmt->altsetting, (ep & USB_DIR_IN) ? "playback" : "capture", fmt->sync_ep, fmt->sync_iface, fmt->sync_altsetting); return 1; } /* Check whether the given UAC2 iface:altset points to an implicit fb source */ static int add_generic_uac2_implicit_fb(struct snd_usb_audio *chip, struct audioformat *fmt, unsigned int ifnum, unsigned int altsetting) { struct usb_host_interface *alts; struct usb_endpoint_descriptor *epd; alts = snd_usb_get_host_interface(chip, ifnum, altsetting); if (!alts) return 0; if (alts->desc.bInterfaceClass != USB_CLASS_AUDIO || alts->desc.bInterfaceSubClass != USB_SUBCLASS_AUDIOSTREAMING || alts->desc.bInterfaceProtocol != UAC_VERSION_2 || alts->desc.bNumEndpoints < 1) return 0; epd = get_endpoint(alts, 0); if (!usb_endpoint_is_isoc_in(epd) || (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) != USB_ENDPOINT_USAGE_IMPLICIT_FB) return 0; return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0, ifnum, alts); } static bool roland_sanity_check_iface(struct usb_host_interface *alts) { if (alts->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC || (alts->desc.bInterfaceSubClass != 2 && alts->desc.bInterfaceProtocol != 2) || alts->desc.bNumEndpoints < 1) return false; return true; } /* Like the UAC2 case above, but specific to Roland with vendor class and hack */ static int add_roland_implicit_fb(struct snd_usb_audio *chip, struct audioformat *fmt, struct usb_host_interface *alts) { struct usb_endpoint_descriptor *epd; if (!roland_sanity_check_iface(alts)) return 0; /* only when both streams are with ASYNC type */ epd = get_endpoint(alts, 0); if (!usb_endpoint_is_isoc_out(epd) || (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC) return 0; /* check capture EP */ alts = snd_usb_get_host_interface(chip, alts->desc.bInterfaceNumber + 1, alts->desc.bAlternateSetting); if (!alts || !roland_sanity_check_iface(alts)) return 0; epd = get_endpoint(alts, 0); if (!usb_endpoint_is_isoc_in(epd) || (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC) return 0; chip->quirk_flags |= QUIRK_FLAG_PLAYBACK_FIRST; return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0, alts->desc.bInterfaceNumber, alts); } /* capture quirk for Roland device; always full-duplex */ static int add_roland_capture_quirk(struct snd_usb_audio *chip, struct audioformat *fmt, struct usb_host_interface *alts) { struct usb_endpoint_descriptor *epd; if (!roland_sanity_check_iface(alts)) return 0; epd = get_endpoint(alts, 0); if (!usb_endpoint_is_isoc_in(epd) || (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC) return 0; alts = snd_usb_get_host_interface(chip, alts->desc.bInterfaceNumber - 1, alts->desc.bAlternateSetting); if (!alts || !roland_sanity_check_iface(alts)) return 0; epd = get_endpoint(alts, 0); if (!usb_endpoint_is_isoc_out(epd)) return 0; return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0, alts->desc.bInterfaceNumber, alts); } /* Playback and capture EPs on Pioneer devices share the same iface/altset * for the implicit feedback operation */ static bool is_pioneer_implicit_fb(struct snd_usb_audio *chip, struct usb_host_interface *alts) { struct usb_endpoint_descriptor *epd; if (USB_ID_VENDOR(chip->usb_id) != 0x2b73 && USB_ID_VENDOR(chip->usb_id) != 0x08e4) return false; if (alts->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) return false; if (alts->desc.bNumEndpoints != 2) return false; epd = get_endpoint(alts, 0); if (!usb_endpoint_is_isoc_out(epd) || (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC) return false; epd = get_endpoint(alts, 1); if (!usb_endpoint_is_isoc_in(epd) || (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC || ((epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) != USB_ENDPOINT_USAGE_DATA && (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) != USB_ENDPOINT_USAGE_IMPLICIT_FB)) return false; return true; } static int __add_generic_implicit_fb(struct snd_usb_audio *chip, struct audioformat *fmt, int iface, int altset) { struct usb_host_interface *alts; struct usb_endpoint_descriptor *epd; alts = snd_usb_get_host_interface(chip, iface, altset); if (!alts) return 0; if ((alts->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC && alts->desc.bInterfaceClass != USB_CLASS_AUDIO) || alts->desc.bNumEndpoints < 1) return 0; epd = get_endpoint(alts, 0); if (!usb_endpoint_is_isoc_in(epd) || (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC) return 0; return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0, iface, alts); } /* More generic quirk: look for the sync EP next to the data EP */ static int add_generic_implicit_fb(struct snd_usb_audio *chip, struct audioformat *fmt, struct usb_host_interface *alts) { if ((fmt->ep_attr & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC) return 0; if (__add_generic_implicit_fb(chip, fmt, alts->desc.bInterfaceNumber + 1, alts->desc.bAlternateSetting)) return 1; return __add_generic_implicit_fb(chip, fmt, alts->desc.bInterfaceNumber - 1, alts->desc.bAlternateSetting); } static const struct snd_usb_implicit_fb_match * find_implicit_fb_entry(struct snd_usb_audio *chip, const struct snd_usb_implicit_fb_match *match, const struct usb_host_interface *alts) { for (; match->id; match++) if (match->id == chip->usb_id && (!match->iface_class || (alts->desc.bInterfaceClass == match->iface_class))) return match; return NULL; } /* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk * applies. Returns 1 if a quirk was found. */ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip, struct audioformat *fmt, struct usb_host_interface *alts) { const struct snd_usb_implicit_fb_match *p; unsigned int attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE; p = find_implicit_fb_entry(chip, playback_implicit_fb_quirks, alts); if (p) { switch (p->type) { case IMPLICIT_FB_GENERIC: return add_generic_implicit_fb(chip, fmt, alts); case IMPLICIT_FB_NONE: return 0; /* No quirk */ case IMPLICIT_FB_FIXED: return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, 0, p->iface, NULL); } } /* Special handling for devices with capture quirks */ p = find_implicit_fb_entry(chip, capture_implicit_fb_quirks, alts); if (p) { switch (p->type) { case IMPLICIT_FB_FIXED: return 0; /* no quirk */ case IMPLICIT_FB_BOTH: chip->quirk_flags |= QUIRK_FLAG_PLAYBACK_FIRST; return add_generic_implicit_fb(chip, fmt, alts); } } /* Generic UAC2 implicit feedback */ if (attr == USB_ENDPOINT_SYNC_ASYNC && alts->desc.bInterfaceClass == USB_CLASS_AUDIO && alts->desc.bInterfaceProtocol == UAC_VERSION_2 && alts->desc.bNumEndpoints == 1) { if (add_generic_uac2_implicit_fb(chip, fmt, alts->desc.bInterfaceNumber + 1, alts->desc.bAlternateSetting)) return 1; } /* Roland/BOSS implicit feedback with vendor spec class */ if (USB_ID_VENDOR(chip->usb_id) == 0x0582) { if (add_roland_implicit_fb(chip, fmt, alts) > 0) return 1; } /* Pioneer devices with vendor spec class */ if (is_pioneer_implicit_fb(chip, alts)) { chip->quirk_flags |= QUIRK_FLAG_PLAYBACK_FIRST; return add_implicit_fb_sync_ep(chip, fmt, get_endpoint(alts, 1)->bEndpointAddress, 1, alts->desc.bInterfaceNumber, alts); } /* Try the generic implicit fb if available */ if (chip->generic_implicit_fb || (chip->quirk_flags & QUIRK_FLAG_GENERIC_IMPLICIT_FB)) return add_generic_implicit_fb(chip, fmt, alts); /* No quirk */ return 0; } /* same for capture, but only handling FIXED entry */ static int audioformat_capture_quirk(struct snd_usb_audio *chip, struct audioformat *fmt, struct usb_host_interface *alts) { const struct snd_usb_implicit_fb_match *p; p = find_implicit_fb_entry(chip, capture_implicit_fb_quirks, alts); if (p && (p->type == IMPLICIT_FB_FIXED || p->type == IMPLICIT_FB_BOTH)) return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, 0, p->iface, NULL); /* Roland/BOSS need full-duplex streams */ if (USB_ID_VENDOR(chip->usb_id) == 0x0582) { if (add_roland_capture_quirk(chip, fmt, alts) > 0) return 1; } if (is_pioneer_implicit_fb(chip, alts)) return 1; /* skip the quirk, also don't handle generic sync EP */ return 0; } /* * Parse altset and set up implicit feedback endpoint on the audioformat */ int snd_usb_parse_implicit_fb_quirk(struct snd_usb_audio *chip, struct audioformat *fmt, struct usb_host_interface *alts) { if (chip->quirk_flags & QUIRK_FLAG_SKIP_IMPLICIT_FB) return 0; if (fmt->endpoint & USB_DIR_IN) return audioformat_capture_quirk(chip, fmt, alts); else return audioformat_implicit_fb_quirk(chip, fmt, alts); } /* * Return the score of matching two audioformats. * Veto the audioformat if: * - It has no channels for some reason. * - Requested PCM format is not supported. * - Requested sample rate is not supported. */ static int match_endpoint_audioformats(struct snd_usb_substream *subs, const struct audioformat *fp, int rate, int channels, snd_pcm_format_t pcm_format) { int i, score; if (fp->channels < 1) return 0; if (!(fp->formats & pcm_format_to_bits(pcm_format))) return 0; if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) { if (rate < fp->rate_min || rate > fp->rate_max) return 0; } else { for (i = 0; i < fp->nr_rates; i++) { if (fp->rate_table[i] == rate) break; } if (i >= fp->nr_rates) return 0; } score = 1; if (fp->channels == channels) score++; return score; } static struct snd_usb_substream * find_matching_substream(struct snd_usb_audio *chip, int stream, int ep_num, int fmt_type) { struct snd_usb_stream *as; struct snd_usb_substream *subs; list_for_each_entry(as, &chip->pcm_list, list) { subs = &as->substream[stream]; if (as->fmt_type == fmt_type && subs->ep_num == ep_num) return subs; } return NULL; } /* * Return the audioformat that is suitable for the implicit fb */ const struct audioformat * snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip, const struct audioformat *target, const struct snd_pcm_hw_params *params, int stream, bool *fixed_rate) { struct snd_usb_substream *subs; const struct audioformat *fp, *sync_fmt = NULL; int score, high_score; /* Use the original audioformat as fallback for the shared altset */ if (target->iface == target->sync_iface && target->altsetting == target->sync_altsetting) sync_fmt = target; subs = find_matching_substream(chip, stream, target->sync_ep, target->fmt_type); if (!subs) goto end; high_score = 0; list_for_each_entry(fp, &subs->fmt_list, list) { score = match_endpoint_audioformats(subs, fp, params_rate(params), params_channels(params), params_format(params)); if (score > high_score) { sync_fmt = fp; high_score = score; } } end: if (fixed_rate) *fixed_rate = snd_usb_pcm_has_fixed_rate(subs); return sync_fmt; } |
24 25 26 1041 1036 1040 1037 1040 25 26 24 25 26 26 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Implementation of the extensible bitmap type. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ /* * Updated: Hewlett-Packard <paul@paul-moore.com> * Added support to import/export the NetLabel category bitmap * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 * * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com> * Applied standard bit operations to improve bitmap scanning. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/jhash.h> #include <net/netlabel.h> #include "ebitmap.h" #include "policydb.h" #define BITS_PER_U64 ((u32)(sizeof(u64) * 8)) static struct kmem_cache *ebitmap_node_cachep __ro_after_init; bool ebitmap_equal(const struct ebitmap *e1, const struct ebitmap *e2) { const struct ebitmap_node *n1, *n2; if (e1->highbit != e2->highbit) return false; n1 = e1->node; n2 = e2->node; while (n1 && n2 && (n1->startbit == n2->startbit) && !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) { n1 = n1->next; n2 = n2->next; } if (n1 || n2) return false; return true; } int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src) { struct ebitmap_node *new, *prev; const struct ebitmap_node *n; ebitmap_init(dst); n = src->node; prev = NULL; while (n) { new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC); if (!new) { ebitmap_destroy(dst); return -ENOMEM; } new->startbit = n->startbit; memcpy(new->maps, n->maps, EBITMAP_SIZE / 8); new->next = NULL; if (prev) prev->next = new; else dst->node = new; prev = new; n = n->next; } dst->highbit = src->highbit; return 0; } int ebitmap_and(struct ebitmap *dst, const struct ebitmap *e1, const struct ebitmap *e2) { struct ebitmap_node *n; u32 bit; int rc; ebitmap_init(dst); ebitmap_for_each_positive_bit(e1, n, bit) { if (ebitmap_get_bit(e2, bit)) { rc = ebitmap_set_bit(dst, bit, 1); if (rc < 0) return rc; } } return 0; } #ifdef CONFIG_NETLABEL /** * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap * @ebmap: the ebitmap to export * @catmap: the NetLabel category bitmap * * Description: * Export a SELinux extensibile bitmap into a NetLabel category bitmap. * Returns zero on success, negative values on error. * */ int ebitmap_netlbl_export(struct ebitmap *ebmap, struct netlbl_lsm_catmap **catmap) { struct ebitmap_node *e_iter = ebmap->node; unsigned long e_map; u32 offset; unsigned int iter; int rc; if (e_iter == NULL) { *catmap = NULL; return 0; } if (*catmap != NULL) netlbl_catmap_free(*catmap); *catmap = NULL; while (e_iter) { offset = e_iter->startbit; for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) { e_map = e_iter->maps[iter]; if (e_map != 0) { rc = netlbl_catmap_setlong(catmap, offset, e_map, GFP_ATOMIC); if (rc != 0) goto netlbl_export_failure; } offset += EBITMAP_UNIT_SIZE; } e_iter = e_iter->next; } return 0; netlbl_export_failure: netlbl_catmap_free(*catmap); return -ENOMEM; } /** * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap * @ebmap: the ebitmap to import * @catmap: the NetLabel category bitmap * * Description: * Import a NetLabel category bitmap into a SELinux extensibile bitmap. * Returns zero on success, negative values on error. * */ int ebitmap_netlbl_import(struct ebitmap *ebmap, struct netlbl_lsm_catmap *catmap) { int rc; struct ebitmap_node *e_iter = NULL; struct ebitmap_node *e_prev = NULL; u32 offset = 0, idx; unsigned long bitmap; for (;;) { rc = netlbl_catmap_getlong(catmap, &offset, &bitmap); if (rc < 0) goto netlbl_import_failure; if (offset == (u32)-1) return 0; /* don't waste ebitmap space if the netlabel bitmap is empty */ if (bitmap == 0) { offset += EBITMAP_UNIT_SIZE; continue; } if (e_iter == NULL || offset >= e_iter->startbit + EBITMAP_SIZE) { e_prev = e_iter; e_iter = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC); if (e_iter == NULL) goto netlbl_import_failure; e_iter->startbit = offset - (offset % EBITMAP_SIZE); if (e_prev == NULL) ebmap->node = e_iter; else e_prev->next = e_iter; ebmap->highbit = e_iter->startbit + EBITMAP_SIZE; } /* offset will always be aligned to an unsigned long */ idx = EBITMAP_NODE_INDEX(e_iter, offset); e_iter->maps[idx] = bitmap; /* next */ offset += EBITMAP_UNIT_SIZE; } /* NOTE: we should never reach this return */ return 0; netlbl_import_failure: ebitmap_destroy(ebmap); return -ENOMEM; } #endif /* CONFIG_NETLABEL */ /* * Check to see if all the bits set in e2 are also set in e1. Optionally, * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed * last_e2bit. */ int ebitmap_contains(const struct ebitmap *e1, const struct ebitmap *e2, u32 last_e2bit) { const struct ebitmap_node *n1, *n2; int i; if (e1->highbit < e2->highbit) return 0; n1 = e1->node; n2 = e2->node; while (n1 && n2 && (n1->startbit <= n2->startbit)) { if (n1->startbit < n2->startbit) { n1 = n1->next; continue; } for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i];) i--; /* Skip trailing NULL map entries */ if (last_e2bit && (i >= 0)) { u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE + __fls(n2->maps[i]); if (lastsetbit > last_e2bit) return 0; } while (i >= 0) { if ((n1->maps[i] & n2->maps[i]) != n2->maps[i]) return 0; i--; } n1 = n1->next; n2 = n2->next; } if (n2) return 0; return 1; } int ebitmap_get_bit(const struct ebitmap *e, u32 bit) { const struct ebitmap_node *n; if (e->highbit < bit) return 0; n = e->node; while (n && (n->startbit <= bit)) { if ((n->startbit + EBITMAP_SIZE) > bit) return ebitmap_node_get_bit(n, bit); n = n->next; } return 0; } int ebitmap_set_bit(struct ebitmap *e, u32 bit, int value) { struct ebitmap_node *n, *prev, *new; prev = NULL; n = e->node; while (n && n->startbit <= bit) { if ((n->startbit + EBITMAP_SIZE) > bit) { if (value) { ebitmap_node_set_bit(n, bit); } else { u32 s; ebitmap_node_clr_bit(n, bit); s = find_first_bit(n->maps, EBITMAP_SIZE); if (s < EBITMAP_SIZE) return 0; /* drop this node from the bitmap */ if (!n->next) { /* * this was the highest map * within the bitmap */ if (prev) e->highbit = prev->startbit + EBITMAP_SIZE; else e->highbit = 0; } if (prev) prev->next = n->next; else e->node = n->next; kmem_cache_free(ebitmap_node_cachep, n); } return 0; } prev = n; n = n->next; } if (!value) return 0; new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC); if (!new) return -ENOMEM; new->startbit = bit - (bit % EBITMAP_SIZE); ebitmap_node_set_bit(new, bit); if (!n) /* this node will be the highest map within the bitmap */ e->highbit = new->startbit + EBITMAP_SIZE; if (prev) { new->next = prev->next; prev->next = new; } else { new->next = e->node; e->node = new; } return 0; } void ebitmap_destroy(struct ebitmap *e) { struct ebitmap_node *n, *temp; if (!e) return; n = e->node; while (n) { temp = n; n = n->next; kmem_cache_free(ebitmap_node_cachep, temp); } e->highbit = 0; e->node = NULL; } int ebitmap_read(struct ebitmap *e, struct policy_file *fp) { struct ebitmap_node *n = NULL; u32 mapunit, count, startbit, index, i; __le32 ebitmap_start; u64 map; __le64 mapbits; __le32 buf[3]; int rc; ebitmap_init(e); rc = next_entry(buf, fp, sizeof buf); if (rc < 0) goto out; mapunit = le32_to_cpu(buf[0]); e->highbit = le32_to_cpu(buf[1]); count = le32_to_cpu(buf[2]); if (mapunit != BITS_PER_U64) { pr_err("SELinux: ebitmap: map size %u does not " "match my size %u (high bit was %u)\n", mapunit, BITS_PER_U64, e->highbit); goto bad; } /* round up e->highbit */ e->highbit += EBITMAP_SIZE - 1; e->highbit -= (e->highbit % EBITMAP_SIZE); if (!e->highbit) { e->node = NULL; goto ok; } if (e->highbit && !count) goto bad; for (i = 0; i < count; i++) { rc = next_entry(&ebitmap_start, fp, sizeof(u32)); if (rc < 0) { pr_err("SELinux: ebitmap: truncated map\n"); goto bad; } startbit = le32_to_cpu(ebitmap_start); if (startbit & (mapunit - 1)) { pr_err("SELinux: ebitmap start bit (%u) is " "not a multiple of the map unit size (%u)\n", startbit, mapunit); goto bad; } if (startbit > e->highbit - mapunit) { pr_err("SELinux: ebitmap start bit (%u) is " "beyond the end of the bitmap (%u)\n", startbit, (e->highbit - mapunit)); goto bad; } if (!n || startbit >= n->startbit + EBITMAP_SIZE) { struct ebitmap_node *tmp; tmp = kmem_cache_zalloc(ebitmap_node_cachep, GFP_KERNEL); if (!tmp) { pr_err("SELinux: ebitmap: out of memory\n"); rc = -ENOMEM; goto bad; } /* round down */ tmp->startbit = startbit - (startbit % EBITMAP_SIZE); if (n) n->next = tmp; else e->node = tmp; n = tmp; } else if (startbit <= n->startbit) { pr_err("SELinux: ebitmap: start bit %u" " comes after start bit %u\n", startbit, n->startbit); goto bad; } rc = next_entry(&mapbits, fp, sizeof(u64)); if (rc < 0) { pr_err("SELinux: ebitmap: truncated map\n"); goto bad; } map = le64_to_cpu(mapbits); if (!map) { pr_err("SELinux: ebitmap: empty map\n"); goto bad; } index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE; while (map) { n->maps[index++] = map & (-1UL); map = EBITMAP_SHIFT_UNIT_SIZE(map); } } if (n && n->startbit + EBITMAP_SIZE != e->highbit) { pr_err("SELinux: ebitmap: high bit %u is not equal to the expected value %zu\n", e->highbit, n->startbit + EBITMAP_SIZE); goto bad; } ok: rc = 0; out: return rc; bad: if (!rc) rc = -EINVAL; ebitmap_destroy(e); goto out; } int ebitmap_write(const struct ebitmap *e, struct policy_file *fp) { struct ebitmap_node *n; u32 bit, count, last_bit, last_startbit; __le32 buf[3]; u64 map; int rc; buf[0] = cpu_to_le32(BITS_PER_U64); count = 0; last_bit = 0; last_startbit = U32_MAX; ebitmap_for_each_positive_bit(e, n, bit) { if (last_startbit == U32_MAX || rounddown(bit, BITS_PER_U64) > last_startbit) { count++; last_startbit = rounddown(bit, BITS_PER_U64); } last_bit = roundup(bit + 1, BITS_PER_U64); } buf[1] = cpu_to_le32(last_bit); buf[2] = cpu_to_le32(count); rc = put_entry(buf, sizeof(u32), 3, fp); if (rc) return rc; map = 0; last_startbit = U32_MAX; ebitmap_for_each_positive_bit(e, n, bit) { if (last_startbit == U32_MAX || rounddown(bit, BITS_PER_U64) > last_startbit) { __le64 buf64[1]; /* this is the very first bit */ if (!map) { last_startbit = rounddown(bit, BITS_PER_U64); map = (u64)1 << (bit - last_startbit); continue; } /* write the last node */ buf[0] = cpu_to_le32(last_startbit); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; buf64[0] = cpu_to_le64(map); rc = put_entry(buf64, sizeof(u64), 1, fp); if (rc) return rc; /* set up for the next node */ map = 0; last_startbit = rounddown(bit, BITS_PER_U64); } map |= (u64)1 << (bit - last_startbit); } /* write the last node */ if (map) { __le64 buf64[1]; /* write the last node */ buf[0] = cpu_to_le32(last_startbit); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; buf64[0] = cpu_to_le64(map); rc = put_entry(buf64, sizeof(u64), 1, fp); if (rc) return rc; } return 0; } u32 ebitmap_hash(const struct ebitmap *e, u32 hash) { struct ebitmap_node *node; /* need to change hash even if ebitmap is empty */ hash = jhash_1word(e->highbit, hash); for (node = e->node; node; node = node->next) { hash = jhash_1word(node->startbit, hash); hash = jhash(node->maps, sizeof(node->maps), hash); } return hash; } void __init ebitmap_cache_init(void) { ebitmap_node_cachep = KMEM_CACHE(ebitmap_node, SLAB_PANIC); } |
10 10 10 10 10 10 10 10 10 10 10 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 | // SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/firmware.h> #include <linux/of.h> #include <linux/property.h> #include <linux/dmi.h> #include <linux/ctype.h> #include <linux/pm_qos.h> #include <linux/nvmem-consumer.h> #include <asm/byteorder.h> #include "core.h" #include "mac.h" #include "htc.h" #include "hif.h" #include "wmi.h" #include "bmi.h" #include "debug.h" #include "htt.h" #include "testmode.h" #include "wmi-ops.h" #include "coredump.h" #include "leds.h" unsigned int ath10k_debug_mask; EXPORT_SYMBOL(ath10k_debug_mask); static unsigned int ath10k_cryptmode_param; static bool uart_print; static bool skip_otp; static bool fw_diag_log; /* frame mode values are mapped as per enum ath10k_hw_txrx_mode */ unsigned int ath10k_frame_mode = ATH10K_HW_TXRX_NATIVE_WIFI; unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) | BIT(ATH10K_FW_CRASH_DUMP_CE_DATA); /* FIXME: most of these should be readonly */ module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644); module_param(uart_print, bool, 0644); module_param(skip_otp, bool, 0644); module_param(fw_diag_log, bool, 0644); module_param_named(frame_mode, ath10k_frame_mode, uint, 0644); module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444); MODULE_PARM_DESC(debug_mask, "Debugging mask"); MODULE_PARM_DESC(uart_print, "Uart target debugging"); MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software"); MODULE_PARM_DESC(frame_mode, "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)"); MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file"); MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { .id = QCA988X_HW_2_0_VERSION, .dev_id = QCA988X_2_0_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, .led_pin = 1, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 2116, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, .board_size = QCA988X_BOARD_DATA_SZ, .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = true, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA988X_HW_2_0_VERSION, .dev_id = QCA988X_2_0_DEVICE_ID_UBNT, .name = "qca988x hw2.0 ubiquiti", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, .led_pin = 0, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 2116, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, .board_size = QCA988X_BOARD_DATA_SZ, .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = true, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA9887_HW_1_0_VERSION, .dev_id = QCA9887_1_0_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca9887 hw1.0", .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, .led_pin = 1, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 2116, .fw = { .dir = QCA9887_HW_1_0_FW_DIR, .board_size = QCA9887_BOARD_DATA_SZ, .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA6174_HW_3_2_VERSION, .dev_id = QCA6174_3_2_DEVICE_ID, .bus = ATH10K_BUS_SDIO, .name = "qca6174 hw3.2 sdio", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 19, .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 0, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca6174_sdio_ops, .hw_clk = qca6174_clk, .target_cpu_freq = 176000000, .decap_align_bytes = 4, .n_cipher_suites = 8, .num_peers = 10, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .uart_pin_workaround = true, .tx_stats_over_pktlog = false, .credit_size_workaround = false, .bmi_large_size_download = true, .supports_peer_stats_info = true, .dynamic_sar_support = true, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA6174_HW_2_1_VERSION, .dev_id = QCA6164_2_1_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca6164 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA6174_HW_2_1_VERSION, .dev_id = QCA6174_2_1_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca6174 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA6174_HW_3_0_VERSION, .dev_id = QCA6174_2_1_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca6174 hw3.0", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA6174_HW_3_2_VERSION, .dev_id = QCA6174_2_1_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca6174 hw3.2", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 8124, .fw = { /* uses same binaries as hw3.0 */ .dir = QCA6174_HW_3_0_FW_DIR, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca6174_ops, .hw_clk = qca6174_clk, .target_cpu_freq = 176000000, .decap_align_bytes = 4, .spectral_bin_discard = 0, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = true, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .supports_peer_stats_info = true, .dynamic_sar_support = true, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = true, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, .dev_id = QCA99X0_2_0_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca99x0 hw2.0", .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, .led_pin = 17, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, .tx_chain_mask = 0xf, .rx_chain_mask = 0xf, .max_spatial_stream = 4, .cal_data_len = 12064, .fw = { .dir = QCA99X0_HW_2_0_FW_DIR, .board_size = QCA99X0_BOARD_DATA_SZ, .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, }, .sw_decrypt_mcast_mgmt = true, .rx_desc_ops = &qca99x0_rx_desc_ops, .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 4, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA9984_HW_1_0_DEV_VERSION, .dev_id = QCA9984_1_0_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca9984/qca9994 hw1.0", .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, .led_pin = 17, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, .tx_chain_mask = 0xf, .rx_chain_mask = 0xf, .max_spatial_stream = 4, .cal_data_len = 12064, .fw = { .dir = QCA9984_HW_1_0_FW_DIR, .board_size = QCA99X0_BOARD_DATA_SZ, .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, .ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ, }, .sw_decrypt_mcast_mgmt = true, .rx_desc_ops = &qca99x0_rx_desc_ops, .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 12, .spectral_bin_offset = 8, /* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz * or 2x2 160Mhz, long-guard-interval. */ .vht160_mcs_rx_highest = 1560, .vht160_mcs_tx_highest = 1560, .n_cipher_suites = 11, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA9888_HW_2_0_DEV_VERSION, .dev_id = QCA9888_2_0_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca9888 hw2.0", .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, .led_pin = 17, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, .tx_chain_mask = 3, .rx_chain_mask = 3, .max_spatial_stream = 2, .cal_data_len = 12064, .fw = { .dir = QCA9888_HW_2_0_FW_DIR, .board_size = QCA99X0_BOARD_DATA_SZ, .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, }, .sw_decrypt_mcast_mgmt = true, .rx_desc_ops = &qca99x0_rx_desc_ops, .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 12, .spectral_bin_offset = 8, /* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or * 1x1 160Mhz, long-guard-interval. */ .vht160_mcs_rx_highest = 780, .vht160_mcs_tx_highest = 780, .n_cipher_suites = 11, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA9377_HW_1_0_DEV_VERSION, .dev_id = QCA9377_1_0_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca9377 hw1.0", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 6, .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA9377_HW_1_1_DEV_VERSION, .dev_id = QCA9377_1_0_DEVICE_ID, .bus = ATH10K_BUS_PCI, .name = "qca9377 hw1.1", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 6, .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca6174_ops, .hw_clk = qca6174_clk, .target_cpu_freq = 176000000, .decap_align_bytes = 4, .spectral_bin_discard = 0, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = true, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA9377_HW_1_1_DEV_VERSION, .dev_id = QCA9377_1_0_DEVICE_ID, .bus = ATH10K_BUS_SDIO, .name = "qca9377 hw1.1 sdio", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 19, .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, .rx_desc_ops = &qca988x_rx_desc_ops, .hw_ops = &qca6174_ops, .hw_clk = qca6174_clk, .target_cpu_freq = 176000000, .decap_align_bytes = 4, .n_cipher_suites = 8, .num_peers = TARGET_QCA9377_HL_NUM_PEERS, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .uart_pin_workaround = true, .credit_size_workaround = true, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = QCA4019_HW_1_0_DEV_VERSION, .dev_id = 0, .bus = ATH10K_BUS_AHB, .name = "qca4019 hw1.0", .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, .led_pin = 0, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x0010000, .continuous_frag_desc = true, .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 125000, .max_probe_resp_desc_thres = 24, .tx_chain_mask = 0x3, .rx_chain_mask = 0x3, .max_spatial_stream = 2, .cal_data_len = 12064, .fw = { .dir = QCA4019_HW_1_0_FW_DIR, .board_size = QCA4019_BOARD_DATA_SZ, .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ, }, .sw_decrypt_mcast_mgmt = true, .rx_desc_ops = &qca99x0_rx_desc_ops, .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 4, .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, .shadow_reg_support = false, .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, .hw_restart_disconnect = false, .use_fw_tx_credits = true, .delay_unmap_buffer = false, .mcast_frame_registration = false, }, { .id = WCN3990_HW_1_0_DEV_VERSION, .dev_id = 0, .bus = ATH10K_BUS_SNOC, .name = "wcn3990 hw1.0", .led_pin = 0, .continuous_frag_desc = true, .tx_chain_mask = 0x7, .rx_chain_mask = 0x7, .max_spatial_stream = 4, .fw = { .dir = WCN3990_HW_1_0_FW_DIR, .board_size = WCN3990_BOARD_DATA_SZ, .board_ext_size = WCN3990_BOARD_EXT_DATA_SZ, }, .sw_decrypt_mcast_mgmt = true, .rx_desc_ops = &wcn3990_rx_desc_ops, .hw_ops = &wcn3990_ops, .decap_align_bytes = 1, .num_peers = TARGET_HL_TLV_NUM_PEERS, .n_cipher_suites = 11, .ast_skid_limit = TARGET_HL_TLV_AST_SKID_LIMIT, .num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES, .target_64bit = true, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, .shadow_reg_support = true, .rri_on_ddr = true, .hw_filter_reset_required = false, .fw_diag_ce_download = false, .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = true, .hw_restart_disconnect = true, .use_fw_tx_credits = false, .delay_unmap_buffer = true, .mcast_frame_registration = false, }, }; static const char *const ath10k_core_fw_feature_str[] = { [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx", [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x", [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx", [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p", [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2", [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps", [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan", [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp", [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad", [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init", [ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode", [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param", [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war", [ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast", [ATH10K_FW_FEATURE_NO_PS] = "no-ps", [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference", [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi", [ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL] = "single-chan-info-per-channel", [ATH10K_FW_FEATURE_PEER_FIXED_RATE] = "peer-fixed-rate", [ATH10K_FW_FEATURE_IRAM_RECOVERY] = "iram-recovery", }; static unsigned int ath10k_core_get_fw_feature_str(char *buf, size_t buf_len, enum ath10k_fw_features feat) { /* make sure that ath10k_core_fw_feature_str[] gets updated */ BUILD_BUG_ON(ARRAY_SIZE(ath10k_core_fw_feature_str) != ATH10K_FW_FEATURE_COUNT); if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) || WARN_ON(!ath10k_core_fw_feature_str[feat])) { return scnprintf(buf, buf_len, "bit%d", feat); } return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]); } void ath10k_core_get_fw_features_str(struct ath10k *ar, char *buf, size_t buf_len) { size_t len = 0; int i; for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) { if (len > 0) len += scnprintf(buf + len, buf_len - len, ","); len += ath10k_core_get_fw_feature_str(buf + len, buf_len - len, i); } } } static void ath10k_send_suspend_complete(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n"); complete(&ar->target_suspend); } static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode) { bool mtu_workaround = ar->hw_params.credit_size_workaround; int ret; u32 param = 0; ret = ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256); if (ret) return ret; ret = ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99); if (ret) return ret; ret = ath10k_bmi_read32(ar, hi_acs_flags, ¶m); if (ret) return ret; param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET; if (mode == ATH10K_FIRMWARE_MODE_NORMAL && !mtu_workaround) param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; else param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; if (mode == ATH10K_FIRMWARE_MODE_UTF) param &= ~HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET; else param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET; ret = ath10k_bmi_write32(ar, hi_acs_flags, param); if (ret) return ret; ret = ath10k_bmi_read32(ar, hi_option_flag2, ¶m); if (ret) return ret; param |= HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_HOST; ret = ath10k_bmi_write32(ar, hi_option_flag2, param); if (ret) return ret; return 0; } static int ath10k_init_configure_target(struct ath10k *ar) { u32 param_host; int ret; /* tell target which HTC version it is used*/ ret = ath10k_bmi_write32(ar, hi_app_host_interest, HTC_PROTOCOL_VERSION); if (ret) { ath10k_err(ar, "settings HTC version failed\n"); return ret; } /* set the firmware mode to STA/IBSS/AP */ ret = ath10k_bmi_read32(ar, hi_option_flag, ¶m_host); if (ret) { ath10k_err(ar, "setting firmware mode (1/2) failed\n"); return ret; } /* TODO following parameters need to be re-visited. */ /* num_device */ param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT); /* Firmware mode */ /* FIXME: Why FW_MODE_AP ??.*/ param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT); /* mac_addr_method */ param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT); /* firmware_bridge */ param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT); /* fwsubmode */ param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT); ret = ath10k_bmi_write32(ar, hi_option_flag, param_host); if (ret) { ath10k_err(ar, "setting firmware mode (2/2) failed\n"); return ret; } /* We do all byte-swapping on the host */ ret = ath10k_bmi_write32(ar, hi_be, 0); if (ret) { ath10k_err(ar, "setting host CPU BE mode failed\n"); return ret; } /* FW descriptor/Data swap flags */ ret = ath10k_bmi_write32(ar, hi_fw_swap, 0); if (ret) { ath10k_err(ar, "setting FW data/desc swap flags failed\n"); return ret; } /* Some devices have a special sanity check that verifies the PCI * Device ID is written to this host interest var. It is known to be * required to boot QCA6164. */ ret = ath10k_bmi_write32(ar, hi_hci_uart_pwr_mgmt_params_ext, ar->dev_id); if (ret) { ath10k_err(ar, "failed to set pwr_mgmt_params: %d\n", ret); return ret; } return 0; } static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, const char *dir, const char *file) { char filename[100]; const struct firmware *fw; int ret; if (file == NULL) return ERR_PTR(-ENOENT); if (dir == NULL) dir = "."; if (ar->board_name) { snprintf(filename, sizeof(filename), "%s/%s/%s", dir, ar->board_name, file); ret = firmware_request_nowarn(&fw, filename, ar->dev); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", filename, ret); if (!ret) return fw; } snprintf(filename, sizeof(filename), "%s/%s", dir, file); ret = firmware_request_nowarn(&fw, filename, ar->dev); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", filename, ret); if (ret) return ERR_PTR(ret); return fw; } static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data, size_t data_len) { u32 board_data_size = ar->hw_params.fw.board_size; u32 board_ext_data_size = ar->hw_params.fw.board_ext_size; u32 board_ext_data_addr; int ret; ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr); if (ret) { ath10k_err(ar, "could not read board ext data addr (%d)\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot push board extended data addr 0x%x\n", board_ext_data_addr); if (board_ext_data_addr == 0) return 0; if (data_len != (board_data_size + board_ext_data_size)) { ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n", data_len, board_data_size, board_ext_data_size); return -EINVAL; } ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, data + board_data_size, board_ext_data_size); if (ret) { ath10k_err(ar, "could not write board ext data (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_board_ext_data_config, (board_ext_data_size << 16) | 1); if (ret) { ath10k_err(ar, "could not write board ext data bit (%d)\n", ret); return ret; } return 0; } static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) { u32 result, address; u8 board_id, chip_id; bool ext_bid_support; int ret, bmi_board_id_param; address = ar->hw_params.patch_load_addr; if (!ar->normal_mode_fw.fw_file.otp_data || !ar->normal_mode_fw.fw_file.otp_len) { ath10k_warn(ar, "failed to retrieve board id because of invalid otp\n"); return -ENODATA; } if (ar->id.bmi_ids_valid) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot already acquired valid otp board id,skip download, board_id %d chip_id %d\n", ar->id.bmi_board_id, ar->id.bmi_chip_id); goto skip_otp_download; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd for board id\n", address, ar->normal_mode_fw.fw_file.otp_len); ret = ath10k_bmi_fast_download(ar, address, ar->normal_mode_fw.fw_file.otp_data, ar->normal_mode_fw.fw_file.otp_len); if (ret) { ath10k_err(ar, "could not write otp for board id check: %d\n", ret); return ret; } if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE || ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM) bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID; else bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID; ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result); if (ret) { ath10k_err(ar, "could not execute otp for board id check: %d\n", ret); return ret; } board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP); chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP); ext_bid_support = (result & ATH10K_BMI_EXT_BOARD_ID_SUPPORT); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot get otp board id result 0x%08x board_id %d chip_id %d ext_bid_support %d\n", result, board_id, chip_id, ext_bid_support); ar->id.ext_bid_supported = ext_bid_support; if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 || (board_id == 0)) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "board id does not exist in otp, ignore it\n"); return -EOPNOTSUPP; } ar->id.bmi_ids_valid = true; ar->id.bmi_board_id = board_id; ar->id.bmi_chip_id = chip_id; skip_otp_download: return 0; } static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data) { struct ath10k *ar = data; const char *bdf_ext; const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC; u8 bdf_enabled; int i; if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE) return; if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "wrong smbios bdf ext type length (%d).\n", hdr->length); return; } bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET); if (!bdf_enabled) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n"); return; } /* Only one string exists (per spec) */ bdf_ext = (char *)hdr + hdr->length; if (memcmp(bdf_ext, magic, strlen(magic)) != 0) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant magic does not match.\n"); return; } for (i = 0; i < strlen(bdf_ext); i++) { if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name contains non ascii chars.\n"); return; } } /* Copy extension name without magic suffix */ if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic), sizeof(ar->id.bdf_ext)) < 0) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", bdf_ext); return; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "found and validated bdf variant smbios_type 0x%x bdf %s\n", ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext); } static int ath10k_core_check_smbios(struct ath10k *ar) { ar->id.bdf_ext[0] = '\0'; dmi_walk(ath10k_core_check_bdfext, ar); if (ar->id.bdf_ext[0] == '\0') return -ENODATA; return 0; } int ath10k_core_check_dt(struct ath10k *ar) { struct device_node *node; const char *variant = NULL; node = ar->dev->of_node; if (!node) return -ENOENT; of_property_read_string(node, "qcom,calibration-variant", &variant); if (!variant) of_property_read_string(node, "qcom,ath10k-calibration-variant", &variant); if (!variant) return -ENODATA; if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0) ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", variant); return 0; } EXPORT_SYMBOL(ath10k_core_check_dt); static int ath10k_download_fw(struct ath10k *ar) { u32 address, data_len; const void *data; int ret; struct pm_qos_request latency_qos; address = ar->hw_params.patch_load_addr; data = ar->running_fw->fw_file.firmware_data; data_len = ar->running_fw->fw_file.firmware_len; ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file); if (ret) { ath10k_err(ar, "failed to configure fw code swap: %d\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot uploading firmware image %p len %d\n", data, data_len); /* Check if device supports to download firmware via * diag copy engine. Downloading firmware via diag CE * greatly reduces the time to download firmware. */ if (ar->hw_params.fw_diag_ce_download) { ret = ath10k_hw_diag_fast_download(ar, address, data, data_len); if (ret == 0) /* firmware upload via diag ce was successful */ return 0; ath10k_warn(ar, "failed to upload firmware via diag ce, trying BMI: %d", ret); } memset(&latency_qos, 0, sizeof(latency_qos)); cpu_latency_qos_add_request(&latency_qos, 0); ret = ath10k_bmi_fast_download(ar, address, data, data_len); cpu_latency_qos_remove_request(&latency_qos); return ret; } void ath10k_core_free_board_files(struct ath10k *ar) { if (!IS_ERR(ar->normal_mode_fw.board)) release_firmware(ar->normal_mode_fw.board); if (!IS_ERR(ar->normal_mode_fw.ext_board)) release_firmware(ar->normal_mode_fw.ext_board); ar->normal_mode_fw.board = NULL; ar->normal_mode_fw.board_data = NULL; ar->normal_mode_fw.board_len = 0; ar->normal_mode_fw.ext_board = NULL; ar->normal_mode_fw.ext_board_data = NULL; ar->normal_mode_fw.ext_board_len = 0; } EXPORT_SYMBOL(ath10k_core_free_board_files); static void ath10k_core_free_firmware_files(struct ath10k *ar) { if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware)) release_firmware(ar->normal_mode_fw.fw_file.firmware); if (!IS_ERR(ar->cal_file)) release_firmware(ar->cal_file); if (!IS_ERR(ar->pre_cal_file)) release_firmware(ar->pre_cal_file); ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file); ar->normal_mode_fw.fw_file.otp_data = NULL; ar->normal_mode_fw.fw_file.otp_len = 0; ar->normal_mode_fw.fw_file.firmware = NULL; ar->normal_mode_fw.fw_file.firmware_data = NULL; ar->normal_mode_fw.fw_file.firmware_len = 0; ar->cal_file = NULL; ar->pre_cal_file = NULL; } static int ath10k_fetch_cal_file(struct ath10k *ar) { char filename[100]; /* pre-cal-<bus>-<id>.bin */ scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin", ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename); if (!IS_ERR(ar->pre_cal_file)) goto success; /* cal-<bus>-<id>.bin */ scnprintf(filename, sizeof(filename), "cal-%s-%s.bin", ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename); if (IS_ERR(ar->cal_file)) /* calibration file is optional, don't print any warnings */ return PTR_ERR(ar->cal_file); success: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n", ATH10K_FW_DIR, filename); return 0; } static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type) { const struct firmware *fw; char boardname[100]; if (bd_ie_type == ATH10K_BD_IE_BOARD) { scnprintf(boardname, sizeof(boardname), "board-%s-%s.bin", ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, boardname); if (IS_ERR(ar->normal_mode_fw.board)) { fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, ATH10K_BOARD_DATA_FILE); ar->normal_mode_fw.board = fw; } if (IS_ERR(ar->normal_mode_fw.board)) return PTR_ERR(ar->normal_mode_fw.board); ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data; ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size; } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, ATH10K_EBOARD_DATA_FILE); ar->normal_mode_fw.ext_board = fw; if (IS_ERR(ar->normal_mode_fw.ext_board)) return PTR_ERR(ar->normal_mode_fw.ext_board); ar->normal_mode_fw.ext_board_data = ar->normal_mode_fw.ext_board->data; ar->normal_mode_fw.ext_board_len = ar->normal_mode_fw.ext_board->size; } return 0; } static int ath10k_core_parse_bd_ie_board(struct ath10k *ar, const void *buf, size_t buf_len, const char *boardname, int bd_ie_type) { const struct ath10k_fw_ie *hdr; bool name_match_found; int ret, board_ie_id; size_t board_ie_len; const void *board_ie_data; name_match_found = false; /* go through ATH10K_BD_IE_BOARD_ elements */ while (buf_len > sizeof(struct ath10k_fw_ie)) { hdr = buf; board_ie_id = le32_to_cpu(hdr->id); board_ie_len = le32_to_cpu(hdr->len); board_ie_data = hdr->data; buf_len -= sizeof(*hdr); buf += sizeof(*hdr); if (buf_len < ALIGN(board_ie_len, 4)) { ath10k_err(ar, "invalid ATH10K_BD_IE_BOARD length: %zu < %zu\n", buf_len, ALIGN(board_ie_len, 4)); ret = -EINVAL; goto out; } switch (board_ie_id) { case ATH10K_BD_IE_BOARD_NAME: ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "board name", "", board_ie_data, board_ie_len); if (board_ie_len != strlen(boardname)) break; ret = memcmp(board_ie_data, boardname, strlen(boardname)); if (ret) break; name_match_found = true; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found match for name '%s'", boardname); break; case ATH10K_BD_IE_BOARD_DATA: if (!name_match_found) /* no match found */ break; if (bd_ie_type == ATH10K_BD_IE_BOARD) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found board data for '%s'", boardname); ar->normal_mode_fw.board_data = board_ie_data; ar->normal_mode_fw.board_len = board_ie_len; } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found eboard data for '%s'", boardname); ar->normal_mode_fw.ext_board_data = board_ie_data; ar->normal_mode_fw.ext_board_len = board_ie_len; } ret = 0; goto out; default: ath10k_warn(ar, "unknown ATH10K_BD_IE_BOARD found: %d\n", board_ie_id); break; } /* jump over the padding */ board_ie_len = ALIGN(board_ie_len, 4); buf_len -= board_ie_len; buf += board_ie_len; } /* no match found */ ret = -ENOENT; out: return ret; } static int ath10k_core_search_bd(struct ath10k *ar, const char *boardname, const u8 *data, size_t len) { size_t ie_len; struct ath10k_fw_ie *hdr; int ret = -ENOENT, ie_id; while (len > sizeof(struct ath10k_fw_ie)) { hdr = (struct ath10k_fw_ie *)data; ie_id = le32_to_cpu(hdr->id); ie_len = le32_to_cpu(hdr->len); len -= sizeof(*hdr); data = hdr->data; if (len < ALIGN(ie_len, 4)) { ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n", ie_id, ie_len, len); return -EINVAL; } switch (ie_id) { case ATH10K_BD_IE_BOARD: ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, boardname, ATH10K_BD_IE_BOARD); if (ret == -ENOENT) /* no match found, continue */ break; /* either found or error, so stop searching */ goto out; case ATH10K_BD_IE_BOARD_EXT: ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, boardname, ATH10K_BD_IE_BOARD_EXT); if (ret == -ENOENT) /* no match found, continue */ break; /* either found or error, so stop searching */ goto out; } /* jump over the padding */ ie_len = ALIGN(ie_len, 4); len -= ie_len; data += ie_len; } out: /* return result of parse_bd_ie_board() or -ENOENT */ return ret; } static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, const char *boardname, const char *fallback_boardname1, const char *fallback_boardname2, const char *filename) { size_t len, magic_len; const u8 *data; int ret; /* Skip if already fetched during board data download */ if (!ar->normal_mode_fw.board) ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename); if (IS_ERR(ar->normal_mode_fw.board)) return PTR_ERR(ar->normal_mode_fw.board); data = ar->normal_mode_fw.board->data; len = ar->normal_mode_fw.board->size; /* magic has extra null byte padded */ magic_len = strlen(ATH10K_BOARD_MAGIC) + 1; if (len < magic_len) { ath10k_err(ar, "failed to find magic value in %s/%s, file too short: %zu\n", ar->hw_params.fw.dir, filename, len); ret = -EINVAL; goto err; } if (memcmp(data, ATH10K_BOARD_MAGIC, magic_len)) { ath10k_err(ar, "found invalid board magic\n"); ret = -EINVAL; goto err; } /* magic is padded to 4 bytes */ magic_len = ALIGN(magic_len, 4); if (len < magic_len) { ath10k_err(ar, "failed: %s/%s too small to contain board data, len: %zu\n", ar->hw_params.fw.dir, filename, len); ret = -EINVAL; goto err; } data += magic_len; len -= magic_len; /* attempt to find boardname in the IE list */ ret = ath10k_core_search_bd(ar, boardname, data, len); /* if we didn't find it and have a fallback name, try that */ if (ret == -ENOENT && fallback_boardname1) ret = ath10k_core_search_bd(ar, fallback_boardname1, data, len); if (ret == -ENOENT && fallback_boardname2) ret = ath10k_core_search_bd(ar, fallback_boardname2, data, len); if (ret == -ENOENT) { ath10k_err(ar, "failed to fetch board data for %s from %s/%s\n", boardname, ar->hw_params.fw.dir, filename); ret = -ENODATA; } if (ret) goto err; return 0; err: ath10k_core_free_board_files(ar); return ret; } static int ath10k_core_create_board_name(struct ath10k *ar, char *name, size_t name_len, bool with_variant, bool with_chip_id) { /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; if (with_variant && ar->id.bdf_ext[0] != '\0') scnprintf(variant, sizeof(variant), ",variant=%s", ar->id.bdf_ext); if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s", ath10k_bus_str(ar->hif.bus), ar->id.bmi_chip_id, ar->id.bmi_board_id, variant); goto out; } if (ar->id.qmi_ids_valid) { if (with_chip_id) scnprintf(name, name_len, "bus=%s,qmi-board-id=%x,qmi-chip-id=%x%s", ath10k_bus_str(ar->hif.bus), ar->id.qmi_board_id, ar->id.qmi_chip_id, variant); else scnprintf(name, name_len, "bus=%s,qmi-board-id=%x", ath10k_bus_str(ar->hif.bus), ar->id.qmi_board_id); goto out; } scnprintf(name, name_len, "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", ath10k_bus_str(ar->hif.bus), ar->id.vendor, ar->id.device, ar->id.subsystem_vendor, ar->id.subsystem_device, variant); out: ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name); return 0; } static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name, size_t name_len) { if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, "bus=%s,bmi-chip-id=%d,bmi-eboard-id=%d", ath10k_bus_str(ar->hif.bus), ar->id.bmi_chip_id, ar->id.bmi_eboard_id); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using eboard name '%s'\n", name); return 0; } /* Fallback if returned board id is zero */ return -1; } int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) { char boardname[100], fallback_boardname1[100], fallback_boardname2[100]; int ret; if (bd_ie_type == ATH10K_BD_IE_BOARD) { /* With variant and chip id */ ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname), true, true); if (ret) { ath10k_err(ar, "failed to create board name: %d", ret); return ret; } /* Without variant and only chip-id */ ret = ath10k_core_create_board_name(ar, fallback_boardname1, sizeof(boardname), false, true); if (ret) { ath10k_err(ar, "failed to create 1st fallback board name: %d", ret); return ret; } /* Without variant and without chip-id */ ret = ath10k_core_create_board_name(ar, fallback_boardname2, sizeof(boardname), false, false); if (ret) { ath10k_err(ar, "failed to create 2nd fallback board name: %d", ret); return ret; } } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { ret = ath10k_core_create_eboard_name(ar, boardname, sizeof(boardname)); if (ret) { ath10k_err(ar, "fallback to eboard.bin since board id 0"); goto fallback; } } ar->bd_api = 2; ret = ath10k_core_fetch_board_data_api_n(ar, boardname, fallback_boardname1, fallback_boardname2, ATH10K_BOARD_API2_FILE); if (!ret) goto success; fallback: ar->bd_api = 1; ret = ath10k_core_fetch_board_data_api_1(ar, bd_ie_type); if (ret) { ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n", ar->hw_params.fw.dir); return ret; } success: ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api); return 0; } EXPORT_SYMBOL(ath10k_core_fetch_board_file); static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar) { u32 result, address; u8 ext_board_id; int ret; address = ar->hw_params.patch_load_addr; if (!ar->normal_mode_fw.fw_file.otp_data || !ar->normal_mode_fw.fw_file.otp_len) { ath10k_warn(ar, "failed to retrieve extended board id due to otp binary missing\n"); return -ENODATA; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd for ext board id\n", address, ar->normal_mode_fw.fw_file.otp_len); ret = ath10k_bmi_fast_download(ar, address, ar->normal_mode_fw.fw_file.otp_data, ar->normal_mode_fw.fw_file.otp_len); if (ret) { ath10k_err(ar, "could not write otp for ext board id check: %d\n", ret); return ret; } ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EXT_BOARD_ID, &result); if (ret) { ath10k_err(ar, "could not execute otp for ext board id check: %d\n", ret); return ret; } if (!result) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "ext board id does not exist in otp, ignore it\n"); return -EOPNOTSUPP; } ext_board_id = result & ATH10K_BMI_EBOARD_ID_STATUS_MASK; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot get otp ext board id result 0x%08x ext_board_id %d\n", result, ext_board_id); ar->id.bmi_eboard_id = ext_board_id; return 0; } static int ath10k_download_board_data(struct ath10k *ar, const void *data, size_t data_len) { u32 board_data_size = ar->hw_params.fw.board_size; u32 eboard_data_size = ar->hw_params.fw.ext_board_size; u32 board_address; u32 ext_board_address; int ret; ret = ath10k_push_board_ext_data(ar, data, data_len); if (ret) { ath10k_err(ar, "could not push board ext data (%d)\n", ret); goto exit; } ret = ath10k_bmi_read32(ar, hi_board_data, &board_address); if (ret) { ath10k_err(ar, "could not read board data addr (%d)\n", ret); goto exit; } ret = ath10k_bmi_write_memory(ar, board_address, data, min_t(u32, board_data_size, data_len)); if (ret) { ath10k_err(ar, "could not write board data (%d)\n", ret); goto exit; } ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1); if (ret) { ath10k_err(ar, "could not write board data bit (%d)\n", ret); goto exit; } if (!ar->id.ext_bid_supported) goto exit; /* Extended board data download */ ret = ath10k_core_get_ext_board_id_from_otp(ar); if (ret == -EOPNOTSUPP) { /* Not fetching ext_board_data if ext board id is 0 */ ath10k_dbg(ar, ATH10K_DBG_BOOT, "otp returned ext board id 0\n"); return 0; } else if (ret) { ath10k_err(ar, "failed to get extended board id: %d\n", ret); goto exit; } ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD_EXT); if (ret) goto exit; if (ar->normal_mode_fw.ext_board_data) { ext_board_address = board_address + EXT_BOARD_ADDRESS_OFFSET; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot writing ext board data to addr 0x%x", ext_board_address); ret = ath10k_bmi_write_memory(ar, ext_board_address, ar->normal_mode_fw.ext_board_data, min_t(u32, eboard_data_size, data_len)); if (ret) ath10k_err(ar, "failed to write ext board data: %d\n", ret); } exit: return ret; } static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param; int ret; ret = ath10k_download_board_data(ar, ar->running_fw->board_data, ar->running_fw->board_len); if (ret) { ath10k_err(ar, "failed to download board data: %d\n", ret); return ret; } /* OTP is optional */ if (!ar->running_fw->fw_file.otp_data || !ar->running_fw->fw_file.otp_len) { ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", ar->running_fw->fw_file.otp_data, ar->running_fw->fw_file.otp_len); return 0; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", address, ar->running_fw->fw_file.otp_len); ret = ath10k_bmi_fast_download(ar, address, ar->running_fw->fw_file.otp_data, ar->running_fw->fw_file.otp_len); if (ret) { ath10k_err(ar, "could not write otp (%d)\n", ret); return ret; } /* As of now pre-cal is valid for 10_4 variants */ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE || ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM) bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL; ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result); if (ret) { ath10k_err(ar, "could not execute otp (%d)\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, ar->running_fw->fw_file.fw_features)) && result != 0) { ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; } return 0; } static int ath10k_download_cal_file(struct ath10k *ar, const struct firmware *file) { int ret; if (!file) return -ENOENT; if (IS_ERR(file)) return PTR_ERR(file); ret = ath10k_download_board_data(ar, file->data, file->size); if (ret) { ath10k_err(ar, "failed to download cal_file data: %d\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n"); return 0; } static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name) { struct device_node *node; int data_len; void *data; int ret; node = ar->dev->of_node; if (!node) /* Device Tree is optional, don't print any warnings if * there's no node for ath10k. */ return -ENOENT; if (!of_get_property(node, dt_name, &data_len)) { /* The calibration data node is optional */ return -ENOENT; } if (data_len != ar->hw_params.cal_data_len) { ath10k_warn(ar, "invalid calibration data length in DT: %d\n", data_len); ret = -EMSGSIZE; goto out; } data = kmalloc(data_len, GFP_KERNEL); if (!data) { ret = -ENOMEM; goto out; } ret = of_property_read_u8_array(node, dt_name, data, data_len); if (ret) { ath10k_warn(ar, "failed to read calibration data from DT: %d\n", ret); goto out_free; } ret = ath10k_download_board_data(ar, data, data_len); if (ret) { ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n", ret); goto out_free; } ret = 0; out_free: kfree(data); out: return ret; } static int ath10k_download_cal_eeprom(struct ath10k *ar) { size_t data_len; void *data = NULL; int ret; ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len); if (ret) { if (ret != -EOPNOTSUPP) ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n", ret); goto out_free; } ret = ath10k_download_board_data(ar, data, data_len); if (ret) { ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n", ret); goto out_free; } ret = 0; out_free: kfree(data); return ret; } static int ath10k_download_cal_nvmem(struct ath10k *ar, const char *cell_name) { struct nvmem_cell *cell; void *buf; size_t len; int ret; cell = devm_nvmem_cell_get(ar->dev, cell_name); if (IS_ERR(cell)) { ret = PTR_ERR(cell); return ret; } buf = nvmem_cell_read(cell, &len); if (IS_ERR(buf)) return PTR_ERR(buf); if (ar->hw_params.cal_data_len != len) { kfree(buf); ath10k_warn(ar, "invalid calibration data length in nvmem-cell '%s': %zu != %u\n", cell_name, len, ar->hw_params.cal_data_len); return -EMSGSIZE; } ret = ath10k_download_board_data(ar, buf, len); kfree(buf); if (ret) ath10k_warn(ar, "failed to download calibration data from nvmem-cell '%s': %d\n", cell_name, ret); return ret; } int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, struct ath10k_fw_file *fw_file) { size_t magic_len, len, ie_len; int ie_id, i, index, bit, ret; struct ath10k_fw_ie *hdr; const u8 *data; __le32 *timestamp, *version; /* first fetch the firmware file (firmware-*.bin) */ fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); if (IS_ERR(fw_file->firmware)) return PTR_ERR(fw_file->firmware); data = fw_file->firmware->data; len = fw_file->firmware->size; /* magic also includes the null byte, check that as well */ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; if (len < magic_len) { ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n", ar->hw_params.fw.dir, name, len); ret = -EINVAL; goto err; } if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { ath10k_err(ar, "invalid firmware magic\n"); ret = -EINVAL; goto err; } /* jump over the padding */ magic_len = ALIGN(magic_len, 4); len -= magic_len; data += magic_len; /* loop elements */ while (len > sizeof(struct ath10k_fw_ie)) { hdr = (struct ath10k_fw_ie *)data; ie_id = le32_to_cpu(hdr->id); ie_len = le32_to_cpu(hdr->len); len -= sizeof(*hdr); data += sizeof(*hdr); if (len < ie_len) { ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", ie_id, len, ie_len); ret = -EINVAL; goto err; } switch (ie_id) { case ATH10K_FW_IE_FW_VERSION: if (ie_len > sizeof(fw_file->fw_version) - 1) break; memcpy(fw_file->fw_version, data, ie_len); fw_file->fw_version[ie_len] = '\0'; ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw version %s\n", fw_file->fw_version); break; case ATH10K_FW_IE_TIMESTAMP: if (ie_len != sizeof(u32)) break; timestamp = (__le32 *)data; ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n", le32_to_cpup(timestamp)); break; case ATH10K_FW_IE_FEATURES: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found firmware features ie (%zd B)\n", ie_len); for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { index = i / 8; bit = i % 8; if (index == ie_len) break; if (data[index] & (1 << bit)) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "Enabling feature bit: %i\n", i); __set_bit(i, fw_file->fw_features); } } ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", fw_file->fw_features, sizeof(fw_file->fw_features)); break; case ATH10K_FW_IE_FW_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw image ie (%zd B)\n", ie_len); fw_file->firmware_data = data; fw_file->firmware_len = ie_len; break; case ATH10K_FW_IE_OTP_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found otp image ie (%zd B)\n", ie_len); fw_file->otp_data = data; fw_file->otp_len = ie_len; break; case ATH10K_FW_IE_WMI_OP_VERSION: if (ie_len != sizeof(u32)) break; version = (__le32 *)data; fw_file->wmi_op_version = le32_to_cpup(version); ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n", fw_file->wmi_op_version); break; case ATH10K_FW_IE_HTT_OP_VERSION: if (ie_len != sizeof(u32)) break; version = (__le32 *)data; fw_file->htt_op_version = le32_to_cpup(version); ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n", fw_file->htt_op_version); break; case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE: ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw code swap image ie (%zd B)\n", ie_len); fw_file->codeswap_data = data; fw_file->codeswap_len = ie_len; break; default: ath10k_warn(ar, "Unknown FW IE: %u\n", le32_to_cpu(hdr->id)); break; } /* jump over the padding */ ie_len = ALIGN(ie_len, 4); len -= ie_len; data += ie_len; } if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) && (!fw_file->firmware_data || !fw_file->firmware_len)) { ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", ar->hw_params.fw.dir, name); ret = -ENOMEDIUM; goto err; } return 0; err: ath10k_core_free_firmware_files(ar); return ret; } static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name, size_t fw_name_len, int fw_api) { switch (ar->hif.bus) { case ATH10K_BUS_SDIO: case ATH10K_BUS_USB: scnprintf(fw_name, fw_name_len, "%s-%s-%d.bin", ATH10K_FW_FILE_BASE, ath10k_bus_str(ar->hif.bus), fw_api); break; case ATH10K_BUS_PCI: case ATH10K_BUS_AHB: case ATH10K_BUS_SNOC: scnprintf(fw_name, fw_name_len, "%s-%d.bin", ATH10K_FW_FILE_BASE, fw_api); break; } } static int ath10k_core_fetch_firmware_files(struct ath10k *ar) { int ret, i; char fw_name[100]; /* calibration file is optional, don't check for any errors */ ath10k_fetch_cal_file(ar); for (i = ATH10K_FW_API_MAX; i >= ATH10K_FW_API_MIN; i--) { ar->fw_api = i; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ath10k_core_get_fw_name(ar, fw_name, sizeof(fw_name), ar->fw_api); ret = ath10k_core_fetch_firmware_api_n(ar, fw_name, &ar->normal_mode_fw.fw_file); if (!ret) goto success; } /* we end up here if we couldn't fetch any firmware */ ath10k_err(ar, "Failed to find firmware-N.bin (N between %d and %d) from %s: %d", ATH10K_FW_API_MIN, ATH10K_FW_API_MAX, ar->hw_params.fw.dir, ret); return ret; success: ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); return 0; } static int ath10k_core_pre_cal_download(struct ath10k *ar) { int ret; ret = ath10k_download_cal_nvmem(ar, "pre-calibration"); if (ret == 0) { ar->cal_mode = ATH10K_PRE_CAL_MODE_NVMEM; goto success; } else if (ret == -EPROBE_DEFER) { return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot did not find a pre-calibration nvmem-cell, try file next: %d\n", ret); ret = ath10k_download_cal_file(ar, ar->pre_cal_file); if (ret == 0) { ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE; goto success; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot did not find a pre calibration file, try DT next: %d\n", ret); ret = ath10k_download_cal_dt(ar, "qcom,pre-calibration-data"); if (ret == -ENOENT) ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data"); if (ret) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "unable to load pre cal data from DT: %d\n", ret); return ret; } ar->cal_mode = ATH10K_PRE_CAL_MODE_DT; success: ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n", ath10k_cal_mode_str(ar->cal_mode)); return 0; } static int ath10k_core_pre_cal_config(struct ath10k *ar) { int ret; ret = ath10k_core_pre_cal_download(ar); if (ret) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "failed to load pre cal data: %d\n", ret); return ret; } ret = ath10k_core_get_board_id_from_otp(ar); if (ret) { ath10k_err(ar, "failed to get board id: %d\n", ret); return ret; } ret = ath10k_download_and_run_otp(ar); if (ret) { ath10k_err(ar, "failed to run otp: %d\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "pre cal configuration done successfully\n"); return 0; } static int ath10k_download_cal_data(struct ath10k *ar) { int ret; ret = ath10k_core_pre_cal_config(ar); if (ret == 0) return 0; ath10k_dbg(ar, ATH10K_DBG_BOOT, "pre cal download procedure failed, try cal file: %d\n", ret); ret = ath10k_download_cal_nvmem(ar, "calibration"); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_NVMEM; goto done; } else if (ret == -EPROBE_DEFER) { return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot did not find a calibration nvmem-cell, try file next: %d\n", ret); ret = ath10k_download_cal_file(ar, ar->cal_file); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_FILE; goto done; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot did not find a calibration file, try DT next: %d\n", ret); ret = ath10k_download_cal_dt(ar, "qcom,calibration-data"); if (ret == -ENOENT) ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data"); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_DT; goto done; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot did not find DT entry, try target EEPROM next: %d\n", ret); ret = ath10k_download_cal_eeprom(ar); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_EEPROM; goto done; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot did not find target EEPROM entry, try OTP next: %d\n", ret); ret = ath10k_download_and_run_otp(ar); if (ret) { ath10k_err(ar, "failed to run otp: %d\n", ret); return ret; } ar->cal_mode = ATH10K_CAL_MODE_OTP; done: ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n", ath10k_cal_mode_str(ar->cal_mode)); return 0; } static void ath10k_core_fetch_btcoex_dt(struct ath10k *ar) { struct device_node *node; u8 coex_support = 0; int ret; node = ar->dev->of_node; if (!node) goto out; ret = of_property_read_u8(node, "qcom,coexist-support", &coex_support); if (ret) { ar->coex_support = true; goto out; } if (coex_support) { ar->coex_support = true; } else { ar->coex_support = false; ar->coex_gpio_pin = -1; goto out; } ret = of_property_read_u32(node, "qcom,coexist-gpio-pin", &ar->coex_gpio_pin); if (ret) ar->coex_gpio_pin = -1; out: ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot coex_support %d coex_gpio_pin %d\n", ar->coex_support, ar->coex_gpio_pin); } static int ath10k_init_uart(struct ath10k *ar) { int ret; /* * Explicitly setting UART prints to zero as target turns it on * based on scratch registers. */ ret = ath10k_bmi_write32(ar, hi_serial_enable, 0); if (ret) { ath10k_warn(ar, "could not disable UART prints (%d)\n", ret); return ret; } if (!uart_print) { if (ar->hw_params.uart_pin_workaround) { ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin); if (ret) { ath10k_warn(ar, "failed to set UART TX pin: %d", ret); return ret; } } return 0; } ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin); if (ret) { ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_serial_enable, 1); if (ret) { ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); return ret; } /* Set the UART baud rate to 19200. */ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200); if (ret) { ath10k_warn(ar, "could not set the baud rate (%d)\n", ret); return ret; } ath10k_info(ar, "UART prints enabled\n"); return 0; } static int ath10k_init_hw_params(struct ath10k *ar) { const struct ath10k_hw_params *hw_params; int i; for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { hw_params = &ath10k_hw_params_list[i]; if (hw_params->bus == ar->hif.bus && hw_params->id == ar->target_version && hw_params->dev_id == ar->dev_id) break; } if (i == ARRAY_SIZE(ath10k_hw_params_list)) { ath10k_err(ar, "Unsupported hardware version: 0x%x\n", ar->target_version); return -EINVAL; } ar->hw_params = *hw_params; ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n", ar->hw_params.name, ar->target_version); return 0; } void ath10k_core_start_recovery(struct ath10k *ar) { if (test_and_set_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags)) { ath10k_warn(ar, "already restarting\n"); return; } queue_work(ar->workqueue, &ar->restart_work); } EXPORT_SYMBOL(ath10k_core_start_recovery); void ath10k_core_napi_enable(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); if (test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags)) return; napi_enable(&ar->napi); set_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags); } EXPORT_SYMBOL(ath10k_core_napi_enable); void ath10k_core_napi_sync_disable(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); if (!test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags)) return; napi_synchronize(&ar->napi); napi_disable(&ar->napi); clear_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags); } EXPORT_SYMBOL(ath10k_core_napi_sync_disable); static void ath10k_core_restart(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, restart_work); int ret; set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); /* Place a barrier to make sure the compiler doesn't reorder * CRASH_FLUSH and calling other functions. */ barrier(); ieee80211_stop_queues(ar->hw); ath10k_drain_tx(ar); complete(&ar->scan.started); complete(&ar->scan.completed); complete(&ar->scan.on_channel); complete(&ar->offchan_tx_completed); complete(&ar->install_key_done); complete(&ar->vdev_setup_done); complete(&ar->vdev_delete_done); complete(&ar->thermal.wmi_sync); complete(&ar->bss_survey_done); wake_up(&ar->htt.empty_tx_wq); wake_up(&ar->wmi.tx_credits_wq); wake_up(&ar->peer_mapping_wq); /* TODO: We can have one instance of cancelling coverage_class_work by * moving it to ath10k_halt(), so that both stop() and restart() would * call that but it takes conf_mutex() and if we call cancel_work_sync() * with conf_mutex it will deadlock. */ cancel_work_sync(&ar->set_coverage_class_work); mutex_lock(&ar->conf_mutex); switch (ar->state) { case ATH10K_STATE_ON: ar->state = ATH10K_STATE_RESTARTING; ath10k_halt(ar); ath10k_scan_finish(ar); ieee80211_restart_hw(ar->hw); break; case ATH10K_STATE_OFF: /* this can happen if driver is being unloaded * or if the crash happens during FW probing */ ath10k_warn(ar, "cannot restart a device that hasn't been started\n"); break; case ATH10K_STATE_RESTARTING: /* hw restart might be requested from multiple places */ break; case ATH10K_STATE_RESTARTED: ar->state = ATH10K_STATE_WEDGED; fallthrough; case ATH10K_STATE_WEDGED: ath10k_warn(ar, "device is wedged, will not restart\n"); break; case ATH10K_STATE_UTF: ath10k_warn(ar, "firmware restart in UTF mode not supported\n"); break; } mutex_unlock(&ar->conf_mutex); ret = ath10k_coredump_submit(ar); if (ret) ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d", ret); complete(&ar->driver_recovery); } static void ath10k_core_set_coverage_class_work(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, set_coverage_class_work); if (ar->hw_params.hw_ops->set_coverage_class) ar->hw_params.hw_ops->set_coverage_class(ar, -1); } static int ath10k_core_init_firmware_features(struct ath10k *ar) { struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file; int max_num_peers; if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) && !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well"); return -EINVAL; } if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) { ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n", ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version); return -EINVAL; } ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI; switch (ath10k_cryptmode_param) { case ATH10K_CRYPT_MODE_HW: clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags); break; case ATH10K_CRYPT_MODE_SW: if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, fw_file->fw_features)) { ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware"); return -EINVAL; } set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags); break; default: ath10k_info(ar, "invalid cryptmode: %d\n", ath10k_cryptmode_param); return -EINVAL; } ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT; ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT; if (ath10k_frame_mode == ATH10K_HW_TXRX_RAW) { if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, fw_file->fw_features)) { ath10k_err(ar, "rawmode = 1 requires support from firmware"); return -EINVAL; } set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); } if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW; /* Workaround: * * Firmware A-MSDU aggregation breaks with RAW Tx encap mode * and causes enormous performance issues (malformed frames, * etc). * * Disabling A-MSDU makes RAW mode stable with heavy traffic * albeit a bit slower compared to regular operation. */ ar->htt.max_num_amsdu = 1; } /* Backwards compatibility for firmwares without * ATH10K_FW_IE_WMI_OP_VERSION. */ if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) { if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) { if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features)) fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2; else fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1; } else { fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN; } } switch (fw_file->wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: max_num_peers = TARGET_NUM_PEERS; ar->max_num_stations = TARGET_NUM_STATIONS; ar->max_num_vdevs = TARGET_NUM_VDEVS; ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC; ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | WMI_STAT_PEER; ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; break; case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2_4: if (ath10k_peer_stats_enabled(ar)) { max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS; ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS; } else { max_num_peers = TARGET_10X_NUM_PEERS; ar->max_num_stations = TARGET_10X_NUM_STATIONS; } ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; ar->fw_stats_req_mask = WMI_STAT_PEER; ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; break; case ATH10K_FW_WMI_OP_VERSION_TLV: max_num_peers = TARGET_TLV_NUM_PEERS; ar->max_num_stations = TARGET_TLV_NUM_STATIONS; ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS; ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS; if (ar->hif.bus == ATH10K_BUS_SDIO) ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC_HL; else ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS; ar->fw_stats_req_mask = WMI_TLV_STAT_PDEV | WMI_TLV_STAT_VDEV | WMI_TLV_STAT_PEER | WMI_TLV_STAT_PEER_EXTD; ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC; break; case ATH10K_FW_WMI_OP_VERSION_10_4: max_num_peers = TARGET_10_4_NUM_PEERS; ar->max_num_stations = TARGET_10_4_NUM_STATIONS; ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; ar->fw_stats_req_mask = WMI_10_4_STAT_PEER | WMI_10_4_STAT_PEER_EXTD | WMI_10_4_STAT_VDEV_EXTD; ar->max_spatial_stream = ar->hw_params.max_spatial_stream; ar->max_num_tdls_vdevs = TARGET_10_4_NUM_TDLS_VDEVS; if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, fw_file->fw_features)) ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC; else ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: default: WARN_ON(1); return -EINVAL; } if (ar->hw_params.num_peers) ar->max_num_peers = ar->hw_params.num_peers; else ar->max_num_peers = max_num_peers; /* Backwards compatibility for firmwares without * ATH10K_FW_IE_HTT_OP_VERSION. */ if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) { switch (fw_file->wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_MAIN: fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN; break; case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2_4: fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1; break; case ATH10K_FW_WMI_OP_VERSION_TLV: fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV; break; case ATH10K_FW_WMI_OP_VERSION_10_4: case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: ath10k_err(ar, "htt op version not found from fw meta data"); return -EINVAL; } } return 0; } static int ath10k_core_reset_rx_filter(struct ath10k *ar) { int ret; int vdev_id; int vdev_type; int vdev_subtype; const u8 *vdev_addr; vdev_id = 0; vdev_type = WMI_VDEV_TYPE_STA; vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); vdev_addr = ar->mac_addr; ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype, vdev_addr); if (ret) { ath10k_err(ar, "failed to create dummy vdev: %d\n", ret); return ret; } ret = ath10k_wmi_vdev_delete(ar, vdev_id); if (ret) { ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret); return ret; } /* WMI and HTT may use separate HIF pipes and are not guaranteed to be * serialized properly implicitly. * * Moreover (most) WMI commands have no explicit acknowledges. It is * possible to infer it implicitly by poking firmware with echo * command - getting a reply means all preceding comments have been * (mostly) processed. * * In case of vdev create/delete this is sufficient. * * Without this it's possible to end up with a race when HTT Rx ring is * started before vdev create/delete hack is complete allowing a short * window of opportunity to receive (and Tx ACK) a bunch of frames. */ ret = ath10k_wmi_barrier(ar); if (ret) { ath10k_err(ar, "failed to ping firmware: %d\n", ret); return ret; } return 0; } static int ath10k_core_compat_services(struct ath10k *ar) { struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file; /* all 10.x firmware versions support thermal throttling but don't * advertise the support via service flags so we have to hardcode * it here */ switch (fw_file->wmi_op_version) { case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2_4: case ATH10K_FW_WMI_OP_VERSION_10_4: set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map); break; default: break; } return 0; } #define TGT_IRAM_READ_PER_ITR (8 * 1024) static int ath10k_core_copy_target_iram(struct ath10k *ar) { const struct ath10k_hw_mem_layout *hw_mem; const struct ath10k_mem_region *tmp, *mem_region = NULL; dma_addr_t paddr; void *vaddr = NULL; u8 num_read_itr; int i, ret; u32 len, remaining_len; /* copy target iram feature must work also when * ATH10K_FW_CRASH_DUMP_RAM_DATA is disabled, so * _ath10k_coredump_get_mem_layout() to accomplist that */ hw_mem = _ath10k_coredump_get_mem_layout(ar); if (!hw_mem) /* if CONFIG_DEV_COREDUMP is disabled we get NULL, then * just silently disable the feature by doing nothing */ return 0; for (i = 0; i < hw_mem->region_table.size; i++) { tmp = &hw_mem->region_table.regions[i]; if (tmp->type == ATH10K_MEM_REGION_TYPE_REG) { mem_region = tmp; break; } } if (!mem_region) return -ENOMEM; for (i = 0; i < ar->wmi.num_mem_chunks; i++) { if (ar->wmi.mem_chunks[i].req_id == WMI_IRAM_RECOVERY_HOST_MEM_REQ_ID) { vaddr = ar->wmi.mem_chunks[i].vaddr; len = ar->wmi.mem_chunks[i].len; break; } } if (!vaddr || !len) { ath10k_warn(ar, "No allocated memory for IRAM back up"); return -ENOMEM; } len = (len < mem_region->len) ? len : mem_region->len; paddr = mem_region->start; num_read_itr = len / TGT_IRAM_READ_PER_ITR; remaining_len = len % TGT_IRAM_READ_PER_ITR; for (i = 0; i < num_read_itr; i++) { ret = ath10k_hif_diag_read(ar, paddr, vaddr, TGT_IRAM_READ_PER_ITR); if (ret) { ath10k_warn(ar, "failed to copy firmware IRAM contents: %d", ret); return ret; } paddr += TGT_IRAM_READ_PER_ITR; vaddr += TGT_IRAM_READ_PER_ITR; } if (remaining_len) { ret = ath10k_hif_diag_read(ar, paddr, vaddr, remaining_len); if (ret) { ath10k_warn(ar, "failed to copy firmware IRAM contents: %d", ret); return ret; } } ath10k_dbg(ar, ATH10K_DBG_BOOT, "target IRAM back up completed\n"); return 0; } int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, const struct ath10k_fw_components *fw) { int status; u32 val; lockdep_assert_held(&ar->conf_mutex); clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); ar->running_fw = fw; if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->running_fw->fw_file.fw_features)) { ath10k_bmi_start(ar); /* Enable hardware clock to speed up firmware download */ if (ar->hw_params.hw_ops->enable_pll_clk) { status = ar->hw_params.hw_ops->enable_pll_clk(ar); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot enable pll ret %d\n", status); } if (ath10k_init_configure_target(ar)) { status = -EINVAL; goto err; } status = ath10k_download_cal_data(ar); if (status) goto err; /* Some of qca988x solutions are having global reset issue * during target initialization. Bypassing PLL setting before * downloading firmware and letting the SoC run on REF_CLK is * fixing the problem. Corresponding firmware change is also * needed to set the clock source once the target is * initialized. */ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT, ar->running_fw->fw_file.fw_features)) { status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1); if (status) { ath10k_err(ar, "could not write to skip_clock_init: %d\n", status); goto err; } } status = ath10k_download_fw(ar); if (status) goto err; status = ath10k_init_uart(ar); if (status) goto err; if (ar->hif.bus == ATH10K_BUS_SDIO) { status = ath10k_init_sdio(ar, mode); if (status) { ath10k_err(ar, "failed to init SDIO: %d\n", status); goto err; } } } ar->htc.htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete; status = ath10k_htc_init(ar); if (status) { ath10k_err(ar, "could not init HTC (%d)\n", status); goto err; } if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->running_fw->fw_file.fw_features)) { status = ath10k_bmi_done(ar); if (status) goto err; } status = ath10k_wmi_attach(ar); if (status) { ath10k_err(ar, "WMI attach failed: %d\n", status); goto err; } status = ath10k_htt_init(ar); if (status) { ath10k_err(ar, "failed to init htt: %d\n", status); goto err_wmi_detach; } status = ath10k_htt_tx_start(&ar->htt); if (status) { ath10k_err(ar, "failed to alloc htt tx: %d\n", status); goto err_wmi_detach; } /* If firmware indicates Full Rx Reorder support it must be used in a * slightly different manner. Let HTT code know. */ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)); status = ath10k_htt_rx_alloc(&ar->htt); if (status) { ath10k_err(ar, "failed to alloc htt rx: %d\n", status); goto err_htt_tx_detach; } status = ath10k_hif_start(ar); if (status) { ath10k_err(ar, "could not start HIF: %d\n", status); goto err_htt_rx_detach; } status = ath10k_htc_wait_target(&ar->htc); if (status) { ath10k_err(ar, "failed to connect to HTC: %d\n", status); goto err_hif_stop; } status = ath10k_hif_start_post(ar); if (status) { ath10k_err(ar, "failed to swap mailbox: %d\n", status); goto err_hif_stop; } if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_htt_connect(&ar->htt); if (status) { ath10k_err(ar, "failed to connect htt (%d)\n", status); goto err_hif_stop; } } status = ath10k_wmi_connect(ar); if (status) { ath10k_err(ar, "could not connect wmi: %d\n", status); goto err_hif_stop; } status = ath10k_htc_start(&ar->htc); if (status) { ath10k_err(ar, "failed to start htc: %d\n", status); goto err_hif_stop; } if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_wmi_wait_for_service_ready(ar); if (status) { ath10k_warn(ar, "wmi service ready event not received"); goto err_hif_stop; } } ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ar->hw->wiphy->fw_version); if (test_bit(ATH10K_FW_FEATURE_IRAM_RECOVERY, ar->running_fw->fw_file.fw_features)) { status = ath10k_core_copy_target_iram(ar); if (status) { ath10k_warn(ar, "failed to copy target iram contents: %d", status); goto err_hif_stop; } } if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) && mode == ATH10K_FIRMWARE_MODE_NORMAL) { val = 0; if (ath10k_peer_stats_enabled(ar)) val = WMI_10_4_PEER_STATS; /* Enable vdev stats by default */ val |= WMI_10_4_VDEV_STATS; if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) val |= WMI_10_4_BSS_CHANNEL_INFO_64; ath10k_core_fetch_btcoex_dt(ar); /* 10.4 firmware supports BT-Coex without reloading firmware * via pdev param. To support Bluetooth coexistence pdev param, * WMI_COEX_GPIO_SUPPORT of extended resource config should be * enabled always. * * We can still enable BTCOEX if firmware has the support * even though btceox_support value is * ATH10K_DT_BTCOEX_NOT_FOUND */ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, ar->running_fw->fw_file.fw_features) && ar->coex_support) val |= WMI_10_4_COEX_GPIO_SUPPORT; if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) val |= WMI_10_4_TDLS_EXPLICIT_MODE_ONLY; if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA; if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) val |= WMI_10_4_TX_DATA_ACK_RSSI; if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) val |= WMI_10_4_REPORT_AIRTIME; if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map)) val |= WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT; status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, "failed to send ext resource cfg command : %d\n", status); goto err_hif_stop; } } status = ath10k_wmi_cmd_init(ar); if (status) { ath10k_err(ar, "could not send WMI init command (%d)\n", status); goto err_hif_stop; } status = ath10k_wmi_wait_for_unified_ready(ar); if (status) { ath10k_err(ar, "wmi unified ready event not received\n"); goto err_hif_stop; } status = ath10k_core_compat_services(ar); if (status) { ath10k_err(ar, "compat services failed: %d\n", status); goto err_hif_stop; } status = ath10k_wmi_pdev_set_base_macaddr(ar, ar->mac_addr); if (status && status != -EOPNOTSUPP) { ath10k_err(ar, "failed to set base mac address: %d\n", status); goto err_hif_stop; } /* Some firmware revisions do not properly set up hardware rx filter * registers. * * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK * is filled with 0s instead of 1s allowing HW to respond with ACKs to * any frames that matches MAC_PCU_RX_FILTER which is also * misconfigured to accept anything. * * The ADDR1 is programmed using internal firmware structure field and * can't be (easily/sanely) reached from the driver explicitly. It is * possible to implicitly make it correct by creating a dummy vdev and * then deleting it. */ if (ar->hw_params.hw_filter_reset_required && mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_core_reset_rx_filter(ar); if (status) { ath10k_err(ar, "failed to reset rx filter: %d\n", status); goto err_hif_stop; } } status = ath10k_htt_rx_ring_refill(ar); if (status) { ath10k_err(ar, "failed to refill htt rx ring: %d\n", status); goto err_hif_stop; } if (ar->max_num_vdevs >= 64) ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL; else ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1; INIT_LIST_HEAD(&ar->arvifs); /* we don't care about HTT in UTF mode */ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_htt_setup(&ar->htt); if (status) { ath10k_err(ar, "failed to setup htt: %d\n", status); goto err_hif_stop; } } status = ath10k_debug_start(ar); if (status) goto err_hif_stop; status = ath10k_hif_set_target_log_mode(ar, fw_diag_log); if (status && status != -EOPNOTSUPP) { ath10k_warn(ar, "set target log mode failed: %d\n", status); goto err_hif_stop; } status = ath10k_leds_start(ar); if (status) goto err_hif_stop; return 0; err_hif_stop: ath10k_hif_stop(ar); err_htt_rx_detach: ath10k_htt_rx_free(&ar->htt); err_htt_tx_detach: ath10k_htt_tx_free(&ar->htt); err_wmi_detach: ath10k_wmi_detach(ar); err: return status; } EXPORT_SYMBOL(ath10k_core_start); int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt) { int ret; unsigned long time_left; reinit_completion(&ar->target_suspend); ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt); if (ret) { ath10k_warn(ar, "could not suspend target (%d)\n", ret); return ret; } time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ); if (!time_left) { ath10k_warn(ar, "suspend timed out - target pause event never came\n"); return -ETIMEDOUT; } return 0; } void ath10k_core_stop(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); ath10k_debug_stop(ar); /* try to suspend target */ if (ar->state != ATH10K_STATE_RESTARTING && ar->state != ATH10K_STATE_UTF) ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); ath10k_hif_stop(ar); ath10k_htt_tx_stop(&ar->htt); ath10k_htt_rx_free(&ar->htt); ath10k_wmi_detach(ar); ar->id.bmi_ids_valid = false; } EXPORT_SYMBOL(ath10k_core_stop); /* mac80211 manages fw/hw initialization through start/stop hooks. However in * order to know what hw capabilities should be advertised to mac80211 it is * necessary to load the firmware (and tear it down immediately since start * hook will try to init it again) before registering */ static int ath10k_core_probe_fw(struct ath10k *ar) { struct bmi_target_info target_info; int ret = 0; ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL); if (ret) { ath10k_err(ar, "could not power on hif bus (%d)\n", ret); return ret; } switch (ar->hif.bus) { case ATH10K_BUS_SDIO: memset(&target_info, 0, sizeof(target_info)); ret = ath10k_bmi_get_target_info_sdio(ar, &target_info); if (ret) { ath10k_err(ar, "could not get target info (%d)\n", ret); goto err_power_down; } ar->target_version = target_info.version; ar->hw->wiphy->hw_version = target_info.version; break; case ATH10K_BUS_PCI: case ATH10K_BUS_AHB: case ATH10K_BUS_USB: memset(&target_info, 0, sizeof(target_info)); ret = ath10k_bmi_get_target_info(ar, &target_info); if (ret) { ath10k_err(ar, "could not get target info (%d)\n", ret); goto err_power_down; } ar->target_version = target_info.version; ar->hw->wiphy->hw_version = target_info.version; break; case ATH10K_BUS_SNOC: memset(&target_info, 0, sizeof(target_info)); ret = ath10k_hif_get_target_info(ar, &target_info); if (ret) { ath10k_err(ar, "could not get target info (%d)\n", ret); goto err_power_down; } ar->target_version = target_info.version; ar->hw->wiphy->hw_version = target_info.version; break; default: ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus); } ret = ath10k_init_hw_params(ar); if (ret) { ath10k_err(ar, "could not get hw params (%d)\n", ret); goto err_power_down; } ret = ath10k_core_fetch_firmware_files(ar); if (ret) { ath10k_err(ar, "could not fetch firmware files (%d)\n", ret); goto err_power_down; } BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) != sizeof(ar->normal_mode_fw.fw_file.fw_version)); memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version, sizeof(ar->hw->wiphy->fw_version)); ath10k_debug_print_hwfw_info(ar); if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->normal_mode_fw.fw_file.fw_features)) { ret = ath10k_core_pre_cal_download(ar); if (ret) { /* pre calibration data download is not necessary * for all the chipsets. Ignore failures and continue. */ ath10k_dbg(ar, ATH10K_DBG_BOOT, "could not load pre cal data: %d\n", ret); } ret = ath10k_core_get_board_id_from_otp(ar); if (ret && ret != -EOPNOTSUPP) { ath10k_err(ar, "failed to get board id from otp: %d\n", ret); goto err_free_firmware_files; } ret = ath10k_core_check_smbios(ar); if (ret) ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n"); ret = ath10k_core_check_dt(ar); if (ret) ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n"); ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD); if (ret) { ath10k_err(ar, "failed to fetch board file: %d\n", ret); goto err_free_firmware_files; } ath10k_debug_print_board_info(ar); } device_get_mac_address(ar->dev, ar->mac_addr); ret = ath10k_core_init_firmware_features(ar); if (ret) { ath10k_err(ar, "fatal problem with firmware features: %d\n", ret); goto err_free_firmware_files; } if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->normal_mode_fw.fw_file.fw_features)) { ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file); if (ret) { ath10k_err(ar, "failed to initialize code swap segment: %d\n", ret); goto err_free_firmware_files; } } mutex_lock(&ar->conf_mutex); ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, &ar->normal_mode_fw); if (ret) { ath10k_err(ar, "could not init core (%d)\n", ret); goto err_unlock; } ath10k_debug_print_boot_info(ar); ath10k_core_stop(ar); mutex_unlock(&ar->conf_mutex); ath10k_hif_power_down(ar); return 0; err_unlock: mutex_unlock(&ar->conf_mutex); err_free_firmware_files: ath10k_core_free_firmware_files(ar); err_power_down: ath10k_hif_power_down(ar); return ret; } static void ath10k_core_register_work(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, register_work); int status; /* peer stats are enabled by default */ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); status = ath10k_core_probe_fw(ar); if (status) { ath10k_err(ar, "could not probe fw (%d)\n", status); goto err; } status = ath10k_mac_register(ar); if (status) { ath10k_err(ar, "could not register to mac80211 (%d)\n", status); goto err_release_fw; } status = ath10k_coredump_register(ar); if (status) { ath10k_err(ar, "unable to register coredump\n"); goto err_unregister_mac; } status = ath10k_debug_register(ar); if (status) { ath10k_err(ar, "unable to initialize debugfs\n"); goto err_unregister_coredump; } status = ath10k_spectral_create(ar); if (status) { ath10k_err(ar, "failed to initialize spectral\n"); goto err_debug_destroy; } status = ath10k_thermal_register(ar); if (status) { ath10k_err(ar, "could not register thermal device: %d\n", status); goto err_spectral_destroy; } status = ath10k_leds_register(ar); if (status) { ath10k_err(ar, "could not register leds: %d\n", status); goto err_thermal_unregister; } set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags); return; err_thermal_unregister: ath10k_thermal_unregister(ar); err_spectral_destroy: ath10k_spectral_destroy(ar); err_debug_destroy: ath10k_debug_destroy(ar); err_unregister_coredump: ath10k_coredump_unregister(ar); err_unregister_mac: ath10k_mac_unregister(ar); err_release_fw: ath10k_core_free_firmware_files(ar); err: /* TODO: It's probably a good idea to release device from the driver * but calling device_release_driver() here will cause a deadlock. */ return; } int ath10k_core_register(struct ath10k *ar, const struct ath10k_bus_params *bus_params) { ar->bus_param = *bus_params; queue_work(ar->workqueue, &ar->register_work); return 0; } EXPORT_SYMBOL(ath10k_core_register); void ath10k_core_unregister(struct ath10k *ar) { cancel_work_sync(&ar->register_work); if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) return; ath10k_leds_unregister(ar); ath10k_thermal_unregister(ar); /* Stop spectral before unregistering from mac80211 to remove the * relayfs debugfs file cleanly. Otherwise the parent debugfs tree * would be already be free'd recursively, leading to a double free. */ ath10k_spectral_destroy(ar); /* We must unregister from mac80211 before we stop HTC and HIF. * Otherwise we will fail to submit commands to FW and mac80211 will be * unhappy about callback failures. */ ath10k_mac_unregister(ar); ath10k_testmode_destroy(ar); ath10k_core_free_firmware_files(ar); ath10k_core_free_board_files(ar); ath10k_debug_unregister(ar); } EXPORT_SYMBOL(ath10k_core_unregister); struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, enum ath10k_bus bus, enum ath10k_hw_rev hw_rev, const struct ath10k_hif_ops *hif_ops) { struct ath10k *ar; int ret; ar = ath10k_mac_create(priv_size); if (!ar) return NULL; ar->ath_common.priv = ar; ar->ath_common.hw = ar->hw; ar->dev = dev; ar->hw_rev = hw_rev; ar->hif.ops = hif_ops; ar->hif.bus = bus; switch (hw_rev) { case ATH10K_HW_QCA988X: case ATH10K_HW_QCA9887: ar->regs = &qca988x_regs; ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca988x_values; break; case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: ar->regs = &qca6174_regs; ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca6174_values; break; case ATH10K_HW_QCA99X0: case ATH10K_HW_QCA9984: ar->regs = &qca99x0_regs; ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca99x0_values; break; case ATH10K_HW_QCA9888: ar->regs = &qca99x0_regs; ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca9888_values; break; case ATH10K_HW_QCA4019: ar->regs = &qca4019_regs; ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca4019_values; break; case ATH10K_HW_WCN3990: ar->regs = &wcn3990_regs; ar->hw_ce_regs = &wcn3990_ce_regs; ar->hw_values = &wcn3990_values; break; default: ath10k_err(ar, "unsupported core hardware revision %d\n", hw_rev); ret = -EOPNOTSUPP; goto err_free_mac; } init_completion(&ar->scan.started); init_completion(&ar->scan.completed); init_completion(&ar->scan.on_channel); init_completion(&ar->target_suspend); init_completion(&ar->driver_recovery); init_completion(&ar->wow.wakeup_completed); init_completion(&ar->install_key_done); init_completion(&ar->vdev_setup_done); init_completion(&ar->vdev_delete_done); init_completion(&ar->thermal.wmi_sync); init_completion(&ar->bss_survey_done); init_completion(&ar->peer_delete_done); init_completion(&ar->peer_stats_info_complete); INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work); ar->workqueue = create_singlethread_workqueue("ath10k_wq"); if (!ar->workqueue) goto err_free_mac; ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq"); if (!ar->workqueue_aux) goto err_free_wq; ar->workqueue_tx_complete = create_singlethread_workqueue("ath10k_tx_complete_wq"); if (!ar->workqueue_tx_complete) goto err_free_aux_wq; mutex_init(&ar->conf_mutex); mutex_init(&ar->dump_mutex); spin_lock_init(&ar->data_lock); for (int ac = 0; ac < IEEE80211_NUM_ACS; ac++) spin_lock_init(&ar->queue_lock[ac]); INIT_LIST_HEAD(&ar->peers); init_waitqueue_head(&ar->peer_mapping_wq); init_waitqueue_head(&ar->htt.empty_tx_wq); init_waitqueue_head(&ar->wmi.tx_credits_wq); skb_queue_head_init(&ar->htt.rx_indication_head); init_completion(&ar->offchan_tx_completed); INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); skb_queue_head_init(&ar->offchan_tx_queue); INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work); skb_queue_head_init(&ar->wmi_mgmt_tx_queue); INIT_WORK(&ar->register_work, ath10k_core_register_work); INIT_WORK(&ar->restart_work, ath10k_core_restart); INIT_WORK(&ar->set_coverage_class_work, ath10k_core_set_coverage_class_work); ar->napi_dev = alloc_netdev_dummy(0); if (!ar->napi_dev) goto err_free_tx_complete; ret = ath10k_coredump_create(ar); if (ret) goto err_free_netdev; ret = ath10k_debug_create(ar); if (ret) goto err_free_coredump; return ar; err_free_coredump: ath10k_coredump_destroy(ar); err_free_netdev: free_netdev(ar->napi_dev); err_free_tx_complete: destroy_workqueue(ar->workqueue_tx_complete); err_free_aux_wq: destroy_workqueue(ar->workqueue_aux); err_free_wq: destroy_workqueue(ar->workqueue); err_free_mac: ath10k_mac_destroy(ar); return NULL; } EXPORT_SYMBOL(ath10k_core_create); void ath10k_core_destroy(struct ath10k *ar) { destroy_workqueue(ar->workqueue); destroy_workqueue(ar->workqueue_aux); destroy_workqueue(ar->workqueue_tx_complete); free_netdev(ar->napi_dev); ath10k_debug_destroy(ar); ath10k_coredump_destroy(ar); ath10k_htt_tx_destroy(&ar->htt); ath10k_wmi_free_host_mem(ar); ath10k_mac_destroy(ar); } EXPORT_SYMBOL(ath10k_core_destroy); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); |
16 16 16 16 1 16 16 16 16 7 6 6 6 7 2 3 16 1 16 1 16 1 16 1 16 1 15 16 1 16 1 16 1 16 2 16 11 5 1 4 4 4 1 3 4 1 4 1 2 2 2 1 1 1 3 3 3 3 3 15 16 16 3 3 3 3 15 3 3 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 | // SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> <http://rt2x00.serialmonkey.com> */ /* Module: rt2500usb Abstract: rt2500usb device specific routines. Supported chipsets: RT2570. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include "rt2x00.h" #include "rt2x00usb.h" #include "rt2500usb.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* * Register access. * All access to the CSR registers will go through the methods * rt2500usb_register_read and rt2500usb_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_USB_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. * If the csr_mutex is already held then the _lock variants must * be used instead. */ static u16 rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset) { __le16 reg; rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, ®, sizeof(reg)); return le16_to_cpu(reg); } static u16 rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset) { __le16 reg; rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, ®, sizeof(reg), REGISTER_TIMEOUT); return le16_to_cpu(reg); } static void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, ®, sizeof(reg)); } static void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, ®, sizeof(reg), REGISTER_TIMEOUT); } static void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u16 length) { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, value, length); } static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, struct rt2x00_field16 field, u16 *reg) { unsigned int i; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { *reg = rt2500usb_register_read_lock(rt2x00dev, offset); if (!rt2x00_get_field16(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; } #define WAIT_FOR_BBP(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR8, PHY_CSR8_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR10, PHY_CSR10_RF_BUSY, (__reg)) static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, ®)) { reg = 0; rt2x00_set_field16(®, PHY_CSR7_DATA, value); rt2x00_set_field16(®, PHY_CSR7_REG_ID, word); rt2x00_set_field16(®, PHY_CSR7_READ_CONTROL, 0); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static u8 rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u16 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, ®)) { reg = 0; rt2x00_set_field16(®, PHY_CSR7_REG_ID, word); rt2x00_set_field16(®, PHY_CSR7_READ_CONTROL, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); if (WAIT_FOR_BBP(rt2x00dev, ®)) reg = rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7); } value = rt2x00_get_field16(reg, PHY_CSR7_DATA); mutex_unlock(&rt2x00dev->csr_mutex); return value; } static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, ®)) { reg = 0; rt2x00_set_field16(®, PHY_CSR9_RF_VALUE, value); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR9, reg); reg = 0; rt2x00_set_field16(®, PHY_CSR10_RF_VALUE, value >> 16); rt2x00_set_field16(®, PHY_CSR10_RF_NUMBER_OF_BITS, 20); rt2x00_set_field16(®, PHY_CSR10_RF_IF_SELECT, 0); rt2x00_set_field16(®, PHY_CSR10_RF_BUSY, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR10, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static u32 _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset) { return rt2500usb_register_read(rt2x00dev, offset); } static void _rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 value) { rt2500usb_register_write(rt2x00dev, offset, value); } static const struct rt2x00debug rt2500usb_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = _rt2500usb_register_read, .write = _rt2500usb_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u16), .word_count = CSR_REG_SIZE / sizeof(u16), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt2500usb_bbp_read, .write = rt2500usb_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt2500usb_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u16 reg; reg = rt2500usb_register_read(rt2x00dev, MAC_CSR19); return rt2x00_get_field16(reg, MAC_CSR19_VAL7); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt2500usb_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; u16 reg; reg = rt2500usb_register_read(led->rt2x00dev, MAC_CSR20); if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) rt2x00_set_field16(®, MAC_CSR20_LINK, enabled); else if (led->type == LED_TYPE_ACTIVITY) rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, enabled); rt2500usb_register_write(led->rt2x00dev, MAC_CSR20, reg); } static int rt2500usb_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u16 reg; reg = rt2500usb_register_read(led->rt2x00dev, MAC_CSR21); rt2x00_set_field16(®, MAC_CSR21_ON_PERIOD, *delay_on); rt2x00_set_field16(®, MAC_CSR21_OFF_PERIOD, *delay_off); rt2500usb_register_write(led->rt2x00dev, MAC_CSR21, reg); return 0; } static void rt2500usb_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt2500usb_brightness_set; led->led_dev.blink_set = rt2500usb_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ /* * rt2500usb does not differentiate between shared and pairwise * keys, so we should use the same function for both key types. */ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { u32 mask; u16 reg; enum cipher curr_cipher; if (crypto->cmd == SET_KEY) { /* * Disallow to set WEP key other than with index 0, * it is known that not work at least on some hardware. * SW crypto will be used in that case. */ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) && key->keyidx != 0) return -EOPNOTSUPP; /* * Pairwise key will always be entry 0, but this * could collide with a shared key on the same * position... */ mask = TXRX_CSR0_KEY_ID.bit_mask; reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0); curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM); reg &= mask; if (reg && reg == mask) return -ENOSPC; reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); key->hw_key_idx += reg ? ffz(reg) : 0; /* * Hardware requires that all keys use the same cipher * (e.g. TKIP-only, AES-only, but not TKIP+AES). * If this is not the first key, compare the cipher with the * first one and fall back to SW crypto if not the same. */ if (key->hw_key_idx > 0 && crypto->cipher != curr_cipher) return -EOPNOTSUPP; rt2500usb_register_multiwrite(rt2x00dev, KEY_ENTRY(key->hw_key_idx), crypto->key, sizeof(crypto->key)); /* * The driver does not support the IV/EIV generation * in hardware. However it demands the data to be provided * both separately as well as inside the frame. * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib * to ensure rt2x00lib will not strip the data from the * frame after the copy, now we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; } /* * TXRX_CSR0_KEY_ID contains only single-bit fields to indicate * a particular key is valid. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field16(®, TXRX_CSR0_ALGORITHM, crypto->cipher); rt2x00_set_field16(®, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); mask = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); if (crypto->cmd == SET_KEY) mask |= 1 << key->hw_key_idx; else if (crypto->cmd == DISABLE_KEY) mask &= ~(1 << key->hw_key_idx); rt2x00_set_field16(®, TXRX_CSR0_KEY_ID, mask); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); return 0; } static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u16 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(®, TXRX_CSR2_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field16(®, TXRX_CSR2_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field16(®, TXRX_CSR2_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field16(®, TXRX_CSR2_DROP_TODS, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field16(®, TXRX_CSR2_DROP_VERSION_ERROR, 1); rt2x00_set_field16(®, TXRX_CSR2_DROP_MULTICAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field16(®, TXRX_CSR2_DROP_BROADCAST, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); } static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { unsigned int bcn_preload; u16 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable beacon config */ bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR20); rt2x00_set_field16(®, TXRX_CSR20_OFFSET, bcn_preload >> 6); rt2x00_set_field16(®, TXRX_CSR20_BCN_EXPECT_WINDOW, 2 * (conf->type != NL80211_IFTYPE_STATION)); rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); /* * Enable synchronisation. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR18); rt2x00_set_field16(®, TXRX_CSR18_OFFSET, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, conf->sync); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } if (flags & CONFIG_UPDATE_MAC) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac, (3 * sizeof(__le16))); if (flags & CONFIG_UPDATE_BSSID) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, conf->bssid, (3 * sizeof(__le16))); } static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { u16 reg; if (changed & BSS_CHANGED_ERP_PREAMBLE) { reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR10); rt2x00_set_field16(®, TXRX_CSR10_AUTORESPOND_PREAMBLE, !!erp->short_preamble); rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates); if (changed & BSS_CHANGED_BEACON_INT) { reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR18); rt2x00_set_field16(®, TXRX_CSR18_INTERVAL, erp->beacon_int * 4); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); } if (changed & BSS_CHANGED_ERP_SLOT) { rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time); rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs); rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs); } } static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r2; u8 r14; u16 csr5; u16 csr6; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); r2 = rt2500usb_bbp_read(rt2x00dev, 2); r14 = rt2500usb_bbp_read(rt2x00dev, 14); csr5 = rt2500usb_register_read(rt2x00dev, PHY_CSR5); csr6 = rt2500usb_register_read(rt2x00dev, PHY_CSR6); /* * Configure the TX antenna. */ switch (ant->tx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 1); break; case ANTENNA_A: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2); break; } /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 1); break; case ANTENNA_A: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); break; } /* * RT2525E and RT5222 need to flip TX I/Q */ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); /* * RT2525E does not need RX I/Q Flip. */ if (rt2x00_rf(rt2x00dev, RF2525E)) rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); } else { rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 0); } rt2500usb_bbp_write(rt2x00dev, 2, r2); rt2500usb_bbp_write(rt2x00dev, 14, r14); rt2500usb_register_write(rt2x00dev, PHY_CSR5, csr5); rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6); } static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf, const int txpower) { /* * Set TXpower. */ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); /* * For RT2525E we should first set the channel to half band higher. */ if (rt2x00_rf(rt2x00dev, RF2525E)) { static const u32 vals[] = { 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, 0x000008ba, 0x000008be, 0x000008b7, 0x00000902, 0x00000902, 0x00000906 }; rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } rt2500usb_rf_write(rt2x00dev, 1, rf->rf1); rt2500usb_rf_write(rt2x00dev, 2, rf->rf2); rt2500usb_rf_write(rt2x00dev, 3, rf->rf3); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev, const int txpower) { u32 rf3; rf3 = rt2x00_rf_read(rt2x00dev, 3); rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); rt2500usb_rf_write(rt2x00dev, 3, rf3); } static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u16 reg; if (state == STATE_SLEEP) { reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18); rt2x00_set_field16(®, MAC_CSR18_DELAY_AFTER_BEACON, rt2x00dev->beacon_int - 20); rt2x00_set_field16(®, MAC_CSR18_BEACONS_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); /* We must first disable autowake before it can be enabled */ rt2x00_set_field16(®, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); rt2x00_set_field16(®, MAC_CSR18_AUTO_WAKE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } else { reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18); rt2x00_set_field16(®, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); } static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt2500usb_config_channel(rt2x00dev, &libconf->rf, libconf->conf->power_level); if ((flags & IEEE80211_CONF_CHANGE_POWER) && !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) rt2500usb_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_PS) rt2500usb_config_ps(rt2x00dev, libconf); } /* * Link tuning */ static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 reg; /* * Update FCS error count from register. */ reg = rt2500usb_register_read(rt2x00dev, STA_CSR0); qual->rx_failed = rt2x00_get_field16(reg, STA_CSR0_FCS_ERROR); /* * Update False CCA count from register. */ reg = rt2500usb_register_read(rt2x00dev, STA_CSR3); qual->false_cca = rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR); } static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 eeprom; u16 value; eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R24_LOW); rt2500usb_bbp_write(rt2x00dev, 24, value); eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R25_LOW); rt2500usb_bbp_write(rt2x00dev, 25, value); eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R61_LOW); rt2500usb_bbp_write(rt2x00dev, 61, value); eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER); rt2500usb_bbp_write(rt2x00dev, 17, value); qual->vgc_level = value; } /* * Queue handlers. */ static void rt2500usb_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(®, TXRX_CSR19_TBCN, 1); rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } static void rt2500usb_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(®, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } /* * Initialization functions. */ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) { u16 reg; rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0x0001, USB_MODE_TEST, REGISTER_TIMEOUT); rt2x00usb_vendor_request_sw(rt2x00dev, USB_SINGLE_WRITE, 0x0308, 0x00f0, REGISTER_TIMEOUT); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x1111); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x1e11); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 1); rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 1); rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR5); rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0, 13); rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0_VALID, 1); rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1, 12); rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR5, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR6); rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0, 10); rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0_VALID, 1); rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1, 11); rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR6, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR7); rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0, 7); rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0_VALID, 1); rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1, 6); rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR7, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR8); rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0, 5); rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0_VALID, 1); rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1, 0); rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1_VALID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, 0); rt2x00_set_field16(®, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f); rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) { reg = rt2500usb_register_read(rt2x00dev, PHY_CSR2); rt2x00_set_field16(®, PHY_CSR2_LNA, 0); } else { reg = 0; rt2x00_set_field16(®, PHY_CSR2_LNA, 1); rt2x00_set_field16(®, PHY_CSR2_LNA_MODE, 3); } rt2500usb_register_write(rt2x00dev, PHY_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0002); rt2500usb_register_write(rt2x00dev, MAC_CSR22, 0x0053); rt2500usb_register_write(rt2x00dev, MAC_CSR15, 0x01ee); rt2500usb_register_write(rt2x00dev, MAC_CSR16, 0x0000); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR8); rt2x00_set_field16(®, MAC_CSR8_MAX_FRAME_UNIT, rt2x00dev->rx->data_size); rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field16(®, TXRX_CSR0_ALGORITHM, CIPHER_NONE); rt2x00_set_field16(®, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); rt2x00_set_field16(®, TXRX_CSR0_KEY_ID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18); rt2x00_set_field16(®, MAC_CSR18_DELAY_AFTER_BEACON, 90); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); reg = rt2500usb_register_read(rt2x00dev, PHY_CSR4); rt2x00_set_field16(®, PHY_CSR4_LOW_RF_LE, 1); rt2500usb_register_write(rt2x00dev, PHY_CSR4, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR1); rt2x00_set_field16(®, TXRX_CSR1_AUTO_SEQUENCE, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); return 0; } static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { value = rt2500usb_bbp_read(rt2x00dev, 0); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 value; u8 reg_id; if (unlikely(rt2500usb_wait_bbp_ready(rt2x00dev))) return -EACCES; rt2500usb_bbp_write(rt2x00dev, 3, 0x02); rt2500usb_bbp_write(rt2x00dev, 4, 0x19); rt2500usb_bbp_write(rt2x00dev, 14, 0x1c); rt2500usb_bbp_write(rt2x00dev, 15, 0x30); rt2500usb_bbp_write(rt2x00dev, 16, 0xac); rt2500usb_bbp_write(rt2x00dev, 18, 0x18); rt2500usb_bbp_write(rt2x00dev, 19, 0xff); rt2500usb_bbp_write(rt2x00dev, 20, 0x1e); rt2500usb_bbp_write(rt2x00dev, 21, 0x08); rt2500usb_bbp_write(rt2x00dev, 22, 0x08); rt2500usb_bbp_write(rt2x00dev, 23, 0x08); rt2500usb_bbp_write(rt2x00dev, 24, 0x80); rt2500usb_bbp_write(rt2x00dev, 25, 0x50); rt2500usb_bbp_write(rt2x00dev, 26, 0x08); rt2500usb_bbp_write(rt2x00dev, 27, 0x23); rt2500usb_bbp_write(rt2x00dev, 30, 0x10); rt2500usb_bbp_write(rt2x00dev, 31, 0x2b); rt2500usb_bbp_write(rt2x00dev, 32, 0xb9); rt2500usb_bbp_write(rt2x00dev, 34, 0x12); rt2500usb_bbp_write(rt2x00dev, 35, 0x50); rt2500usb_bbp_write(rt2x00dev, 39, 0xc4); rt2500usb_bbp_write(rt2x00dev, 40, 0x02); rt2500usb_bbp_write(rt2x00dev, 41, 0x60); rt2500usb_bbp_write(rt2x00dev, 53, 0x10); rt2500usb_bbp_write(rt2x00dev, 54, 0x18); rt2500usb_bbp_write(rt2x00dev, 56, 0x08); rt2500usb_bbp_write(rt2x00dev, 57, 0x10); rt2500usb_bbp_write(rt2x00dev, 58, 0x08); rt2500usb_bbp_write(rt2x00dev, 61, 0x60); rt2500usb_bbp_write(rt2x00dev, 62, 0x10); rt2500usb_bbp_write(rt2x00dev, 75, 0xff); for (i = 0; i < EEPROM_BBP_SIZE; i++) { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt2500usb_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev) { /* * Initialize all registers. */ if (unlikely(rt2500usb_init_registers(rt2x00dev) || rt2500usb_init_bbp(rt2x00dev))) return -EIO; return 0; } static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121); /* * Disable synchronisation. */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); rt2x00usb_disable_radio(rt2x00dev); } static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u16 reg; u16 reg2; unsigned int i; bool put_to_sleep; u8 bbp_state; u8 rf_state; put_to_sleep = (state != STATE_AWAKE); reg = 0; rt2x00_set_field16(®, MAC_CSR17_BBP_DESIRE_STATE, state); rt2x00_set_field16(®, MAC_CSR17_RF_DESIRE_STATE, state); rt2x00_set_field16(®, MAC_CSR17_PUT_TO_SLEEP, put_to_sleep); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); rt2x00_set_field16(®, MAC_CSR17_SET_STATE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { reg2 = rt2500usb_register_read(rt2x00dev, MAC_CSR17); bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE); rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE); if (bbp_state == state && rf_state == state) return 0; rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); msleep(30); } return -EBUSY; } static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2500usb_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt2500usb_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2500usb_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt2500usb_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *txd = (__le32 *) entry->skb->data; u32 word; /* * Start writing the descriptor words. */ word = rt2x00_desc_read(txd, 0); rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); rt2x00_desc_write(txd, 0, word); word = rt2x00_desc_read(txd, 1); rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs); rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); rt2x00_desc_write(txd, 1, word); word = rt2x00_desc_read(txd, 2); rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); } /* * Register descriptor details in skb frame descriptor. */ skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * TX data initialization */ static void rt2500usb_beacondone(struct urb *urb); static void rt2500usb_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint); int length; u16 reg, reg0; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); /* * Add space for the descriptor in front of the skb. */ skb_push(entry->skb, TXD_DESC_SIZE); memset(entry->skb->data, 0, TXD_DESC_SIZE); /* * Write the TX descriptor for the beacon. */ rt2500usb_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * USB devices cannot blindly pass the skb->len as the * length of the data to usb_fill_bulk_urb. Pass the skb * to the driver to determine what the length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe, entry->skb->data, length, rt2500usb_beacondone, entry); /* * Second we need to create the guardian byte. * We only need a single byte, so lets recycle * the 'flags' field we are not using for beacons. */ bcn_priv->guardian_data = 0; usb_fill_bulk_urb(bcn_priv->guardian_urb, usb_dev, pipe, &bcn_priv->guardian_data, 1, rt2500usb_beacondone, entry); /* * Send out the guardian byte. */ usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC); /* * Enable beaconing again. */ rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(®, TXRX_CSR19_TBCN, 1); reg0 = reg; rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 1); /* * Beacon generation will fail initially. * To prevent this we need to change the TXRX_CSR19 * register several times (reg0 is the same as reg * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0 * and 1 in reg). */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } static int rt2500usb_get_tx_data_len(struct queue_entry *entry) { int length; /* * The length _must_ be a multiple of 2, * but it must _not_ be a multiple of the USB packet size. */ length = roundup(entry->skb->len, 2); length += (2 * !(length % entry->queue->usb_maxpacket)); return length; } /* * RX control handlers */ static void rt2500usb_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *rxd = (__le32 *)(entry->skb->data + (entry_priv->urb->actual_length - entry->queue->desc_size)); u32 word0; u32 word1; /* * Copy descriptor to the skbdesc->desc buffer, making it safe from moving of * frame data in rt2x00usb. */ memcpy(skbdesc->desc, rxd, skbdesc->desc_len); rxd = (__le32 *)skbdesc->desc; /* * It is now safe to read the descriptor on all architectures. */ word0 = rt2x00_desc_read(rxd, 0); word1 = rt2x00_desc_read(rxd, 1); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER); if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR)) rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY; if (rxdesc->cipher != CIPHER_NONE) { rxdesc->iv[0] = _rt2x00_desc_read(rxd, 2); rxdesc->iv[1] = _rt2x00_desc_read(rxd, 3); rxdesc->dev_flags |= RXDONE_CRYPTO_IV; /* ICV is located at the end of frame */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) rxdesc->flags |= RX_FLAG_MMIC_ERROR; } /* * Obtain the status about this packet. * When frame was received with an OFDM bitrate, * the signal is the PLCP value. If it was received with * a CCK bitrate the signal is the rate in 100kbit/s. */ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); rxdesc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) - rt2x00dev->rssi_offset; rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); if (rt2x00_get_field32(word0, RXD_W0_OFDM)) rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; else rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; /* * Adjust the skb memory window to the frame boundaries. */ skb_trim(entry->skb, rxdesc->size); } /* * Interrupt functions. */ static void rt2500usb_beacondone(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) return; /* * Check if this was the guardian beacon, * if that was the case we need to send the real beacon now. * Otherwise we should free the sk_buffer, the device * should be doing the rest of the work now. */ if (bcn_priv->guardian_urb == urb) { usb_submit_urb(bcn_priv->urb, GFP_ATOMIC); } else if (bcn_priv->urb == urb) { dev_kfree_skb(entry->skb); entry->skb = NULL; } } /* * Device probe functions. */ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) { u16 word; u8 *mac; u8 bbp; rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); rt2x00lib_set_mac_address(rt2x00dev, mac); word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, LED_MODE_DEFAULT); rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, DEFAULT_RSSI_OFFSET); rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); rt2x00_eeprom_dbg(rt2x00dev, "Calibrate offset: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune: 0x%04x\n", word); } /* * Switch lower vgc bound to current BBP R17 value, * lower the value a bit for better quality. */ bbp = rt2500usb_bbp_read(rt2x00dev, 17); bbp -= 6; word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); } else { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r17: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r24: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r25: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r61: 0x%04x\n", word); } return 0; } static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) { u16 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR0); rt2x00_set_chip(rt2x00dev, RT2570, value, reg); if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) { rt2x00_err(rt2x00dev, "Invalid RT chipset detected\n"); return -ENODEV; } if (!rt2x00_rf(rt2x00dev, RF2522) && !rt2x00_rf(rt2x00dev, RF2523) && !rt2x00_rf(rt2x00dev, RF2524) && !rt2x00_rf(rt2x00dev, RF2525) && !rt2x00_rf(rt2x00dev, RF2525E) && !rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n"); return -ENODEV; } /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * When the eeprom indicates SW_DIVERSITY use HW_DIVERSITY instead. * I am not 100% sure about this, but the legacy drivers do not * indicate antenna swapping in software is required when * diversity is enabled. */ if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* * Store led mode, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); if (value == LED_MODE_TXRX_ACTIVITY || value == LED_MODE_DEFAULT || value == LED_MODE_ASUS) rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_ACTIVITY); #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Read the RSSI <-> dBm offset information. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET); rt2x00dev->rssi_offset = rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); return 0; } /* * RF value list for RF2522 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2522[] = { { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, }; /* * RF value list for RF2523 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2523[] = { { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, }; /* * RF value list for RF2524 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2524[] = { { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, }; /* * RF value list for RF2525 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525[] = { { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, }; /* * RF value list for RF2525e * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525e[] = { { 1, 0x00022010, 0x0000089a, 0x00060111, 0x00000e1b }, { 2, 0x00022010, 0x0000089e, 0x00060111, 0x00000e07 }, { 3, 0x00022010, 0x0000089e, 0x00060111, 0x00000e1b }, { 4, 0x00022010, 0x000008a2, 0x00060111, 0x00000e07 }, { 5, 0x00022010, 0x000008a2, 0x00060111, 0x00000e1b }, { 6, 0x00022010, 0x000008a6, 0x00060111, 0x00000e07 }, { 7, 0x00022010, 0x000008a6, 0x00060111, 0x00000e1b }, { 8, 0x00022010, 0x000008aa, 0x00060111, 0x00000e07 }, { 9, 0x00022010, 0x000008aa, 0x00060111, 0x00000e1b }, { 10, 0x00022010, 0x000008ae, 0x00060111, 0x00000e07 }, { 11, 0x00022010, 0x000008ae, 0x00060111, 0x00000e1b }, { 12, 0x00022010, 0x000008b2, 0x00060111, 0x00000e07 }, { 13, 0x00022010, 0x000008b2, 0x00060111, 0x00000e1b }, { 14, 0x00022010, 0x000008b6, 0x00060111, 0x00000e23 }, }; /* * RF value list for RF5222 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5222[] = { { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, /* 802.11 HyperLan 2 */ { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, /* 802.11 UNII */ { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, }; static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; u8 *tx_power; unsigned int i; /* * Initialize all hw fields. * * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are * capable of sending the buffered frames out after the DTIM * transmission using rt2x00lib_beacondone. This will send out * multicast and broadcast traffic immediately instead of buffering it * infinitly and thus dropping it after some time. */ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS); ieee80211_hw_set(rt2x00dev->hw, RX_INCLUDES_FCS); ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM); /* * Disable powersaving as default. */ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; if (rt2x00_rf(rt2x00dev, RF2522)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); spec->channels = rf_vals_bg_2522; } else if (rt2x00_rf(rt2x00dev, RF2523)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); spec->channels = rf_vals_bg_2523; } else if (rt2x00_rf(rt2x00dev, RF2524)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); spec->channels = rf_vals_bg_2524; } else if (rt2x00_rf(rt2x00dev, RF2525)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); spec->channels = rf_vals_bg_2525; } else if (rt2x00_rf(rt2x00dev, RF2525E)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); spec->channels = rf_vals_bg_2525e; } else if (rt2x00_rf(rt2x00dev, RF5222)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5222); spec->channels = rf_vals_5222; } /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); for (i = 0; i < 14; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } if (spec->num_channels > 14) { for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = DEFAULT_TXPOWER; } } return 0; } static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u16 reg; /* * Allocate eeprom data. */ retval = rt2500usb_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt2500usb_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR19); rt2x00_set_field16(®, MAC_CSR19_DIR0, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); /* * Initialize hw specifications. */ retval = rt2500usb_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device requires the atim queue */ __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); __set_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags); if (!modparam_nohwcrypt) { __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_COPY_IV, &rt2x00dev->cap_flags); } __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } static const struct ieee80211_ops rt2500usb_mac80211_ops = { .add_chanctx = ieee80211_emulate_add_chanctx, .remove_chanctx = ieee80211_emulate_remove_chanctx, .change_chanctx = ieee80211_emulate_change_chanctx, .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2x00mac_conf_tx, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { .probe_hw = rt2500usb_probe_hw, .initialize = rt2x00usb_initialize, .uninitialize = rt2x00usb_uninitialize, .clear_entry = rt2x00usb_clear_entry, .set_device_state = rt2500usb_set_device_state, .rfkill_poll = rt2500usb_rfkill_poll, .link_stats = rt2500usb_link_stats, .reset_tuner = rt2500usb_reset_tuner, .watchdog = rt2x00usb_watchdog, .start_queue = rt2500usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt2500usb_stop_queue, .flush_queue = rt2x00usb_flush_queue, .write_tx_desc = rt2500usb_write_tx_desc, .write_beacon = rt2500usb_write_beacon, .get_tx_data_len = rt2500usb_get_tx_data_len, .fill_rxdone = rt2500usb_fill_rxdone, .config_shared_key = rt2500usb_config_key, .config_pairwise_key = rt2500usb_config_key, .config_filter = rt2500usb_config_filter, .config_intf = rt2500usb_config_intf, .config_erp = rt2500usb_config_erp, .config_ant = rt2500usb_config_ant, .config = rt2500usb_config, }; static void rt2500usb_queue_init(struct data_queue *queue) { switch (queue->qid) { case QID_RX: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = RXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_BEACON: queue->limit = 1; queue->data_size = MGMT_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb_bcn); break; case QID_ATIM: queue->limit = 8; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; default: BUG(); break; } } static const struct rt2x00_ops rt2500usb_ops = { .name = KBUILD_MODNAME, .max_ap_intf = 1, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt2500usb_queue_init, .lib = &rt2500usb_rt2x00_ops, .hw = &rt2500usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2500usb_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * rt2500usb module information. */ static const struct usb_device_id rt2500usb_device_table[] = { /* ASUS */ { USB_DEVICE(0x0b05, 0x1706) }, { USB_DEVICE(0x0b05, 0x1707) }, /* Belkin */ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */ { USB_DEVICE(0x050d, 0x7051) }, /* Cisco Systems */ { USB_DEVICE(0x13b1, 0x000d) }, { USB_DEVICE(0x13b1, 0x0011) }, { USB_DEVICE(0x13b1, 0x001a) }, /* Conceptronic */ { USB_DEVICE(0x14b2, 0x3c02) }, /* D-LINK */ { USB_DEVICE(0x2001, 0x3c00) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x8001) }, { USB_DEVICE(0x1044, 0x8007) }, /* Hercules */ { USB_DEVICE(0x06f8, 0xe000) }, /* Melco */ { USB_DEVICE(0x0411, 0x005e) }, { USB_DEVICE(0x0411, 0x0066) }, { USB_DEVICE(0x0411, 0x0067) }, { USB_DEVICE(0x0411, 0x008b) }, { USB_DEVICE(0x0411, 0x0097) }, /* MSI */ { USB_DEVICE(0x0db0, 0x6861) }, { USB_DEVICE(0x0db0, 0x6865) }, { USB_DEVICE(0x0db0, 0x6869) }, /* Ralink */ { USB_DEVICE(0x148f, 0x1706) }, { USB_DEVICE(0x148f, 0x2570) }, { USB_DEVICE(0x148f, 0x9020) }, /* Sagem */ { USB_DEVICE(0x079b, 0x004b) }, /* Siemens */ { USB_DEVICE(0x0681, 0x3c06) }, /* SMC */ { USB_DEVICE(0x0707, 0xee13) }, /* Spairon */ { USB_DEVICE(0x114b, 0x0110) }, /* SURECOM */ { USB_DEVICE(0x0769, 0x11f3) }, /* Trust */ { USB_DEVICE(0x0eb0, 0x9020) }, /* VTech */ { USB_DEVICE(0x0f88, 0x3012) }, /* Zinwell */ { USB_DEVICE(0x5a57, 0x0260) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); MODULE_LICENSE("GPL"); static int rt2500usb_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { return rt2x00usb_probe(usb_intf, &rt2500usb_ops); } static struct usb_driver rt2500usb_driver = { .name = KBUILD_MODNAME, .id_table = rt2500usb_device_table, .probe = rt2500usb_probe, .disconnect = rt2x00usb_disconnect, .suspend = rt2x00usb_suspend, .resume = rt2x00usb_resume, .reset_resume = rt2x00usb_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rt2500usb_driver); |
2 2 13 13 8 7 13 13 13 13 13 13 13 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 | // SPDX-License-Identifier: GPL-2.0-only /* * x86-optimized CRC32 functions * * Copyright (C) 2008 Intel Corporation * Copyright 2012 Xyratex Technology Limited * Copyright 2024 Google LLC */ #include <linux/crc32.h> #include <linux/module.h> #include "crc-pclmul-template.h" static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pclmulqdq); DECLARE_CRC_PCLMUL_FUNCS(crc32_lsb, u32); u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) { CRC_PCLMUL(crc, p, len, crc32_lsb, crc32_lsb_0xedb88320_consts, have_pclmulqdq); return crc32_le_base(crc, p, len); } EXPORT_SYMBOL(crc32_le_arch); #ifdef CONFIG_X86_64 #define CRC32_INST "crc32q %1, %q0" #else #define CRC32_INST "crc32l %1, %0" #endif /* * Use carryless multiply version of crc32c when buffer size is >= 512 to * account for FPU state save/restore overhead. */ #define CRC32C_PCLMUL_BREAKEVEN 512 asmlinkage u32 crc32c_x86_3way(u32 crc, const u8 *buffer, size_t len); u32 crc32c_arch(u32 crc, const u8 *p, size_t len) { size_t num_longs; if (!static_branch_likely(&have_crc32)) return crc32c_base(crc, p, len); if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN && static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) { kernel_fpu_begin(); crc = crc32c_x86_3way(crc, p, len); kernel_fpu_end(); return crc; } for (num_longs = len / sizeof(unsigned long); num_longs != 0; num_longs--, p += sizeof(unsigned long)) asm(CRC32_INST : "+r" (crc) : ASM_INPUT_RM (*(unsigned long *)p)); if (sizeof(unsigned long) > 4 && (len & 4)) { asm("crc32l %1, %0" : "+r" (crc) : ASM_INPUT_RM (*(u32 *)p)); p += 4; } if (len & 2) { asm("crc32w %1, %0" : "+r" (crc) : ASM_INPUT_RM (*(u16 *)p)); p += 2; } if (len & 1) asm("crc32b %1, %0" : "+r" (crc) : ASM_INPUT_RM (*p)); return crc; } EXPORT_SYMBOL(crc32c_arch); u32 crc32_be_arch(u32 crc, const u8 *p, size_t len) { return crc32_be_base(crc, p, len); } EXPORT_SYMBOL(crc32_be_arch); static int __init crc32_x86_init(void) { if (boot_cpu_has(X86_FEATURE_XMM4_2)) static_branch_enable(&have_crc32); if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { static_branch_enable(&have_pclmulqdq); INIT_CRC_PCLMUL(crc32_lsb); } return 0; } subsys_initcall(crc32_x86_init); static void __exit crc32_x86_exit(void) { } module_exit(crc32_x86_exit); u32 crc32_optimizations(void) { u32 optimizations = 0; if (static_key_enabled(&have_crc32)) optimizations |= CRC32C_OPTIMIZATION; if (static_key_enabled(&have_pclmulqdq)) optimizations |= CRC32_LE_OPTIMIZATION; return optimizations; } EXPORT_SYMBOL(crc32_optimizations); MODULE_DESCRIPTION("x86-optimized CRC32 functions"); MODULE_LICENSE("GPL"); |
1696 3267 3263 3235 3328 3318 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM timer #if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_TIMER_H #include <linux/tracepoint.h> #include <linux/hrtimer.h> #include <linux/timer.h> DECLARE_EVENT_CLASS(timer_class, TP_PROTO(struct timer_list *timer), TP_ARGS(timer), TP_STRUCT__entry( __field( void *, timer ) ), TP_fast_assign( __entry->timer = timer; ), TP_printk("timer=%p", __entry->timer) ); /** * timer_init - called when the timer is initialized * @timer: pointer to struct timer_list */ DEFINE_EVENT(timer_class, timer_init, TP_PROTO(struct timer_list *timer), TP_ARGS(timer) ); #define decode_timer_flags(flags) \ __print_flags(flags, "|", \ { TIMER_MIGRATING, "M" }, \ { TIMER_DEFERRABLE, "D" }, \ { TIMER_PINNED, "P" }, \ { TIMER_IRQSAFE, "I" }) /** * timer_start - called when the timer is started * @timer: pointer to struct timer_list * @bucket_expiry: the bucket expiry time */ TRACE_EVENT(timer_start, TP_PROTO(struct timer_list *timer, unsigned long bucket_expiry), TP_ARGS(timer, bucket_expiry), TP_STRUCT__entry( __field( void *, timer ) __field( void *, function ) __field( unsigned long, expires ) __field( unsigned long, bucket_expiry ) __field( unsigned long, now ) __field( unsigned int, flags ) ), TP_fast_assign( __entry->timer = timer; __entry->function = timer->function; __entry->expires = timer->expires; __entry->bucket_expiry = bucket_expiry; __entry->now = jiffies; __entry->flags = timer->flags; ), TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] bucket_expiry=%lu cpu=%u idx=%u flags=%s", __entry->timer, __entry->function, __entry->expires, (long)__entry->expires - __entry->now, __entry->bucket_expiry, __entry->flags & TIMER_CPUMASK, __entry->flags >> TIMER_ARRAYSHIFT, decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK)) ); /** * timer_expire_entry - called immediately before the timer callback * @timer: pointer to struct timer_list * @baseclk: value of timer_base::clk when timer expires * * Allows to determine the timer latency. */ TRACE_EVENT(timer_expire_entry, TP_PROTO(struct timer_list *timer, unsigned long baseclk), TP_ARGS(timer, baseclk), TP_STRUCT__entry( __field( void *, timer ) __field( unsigned long, now ) __field( void *, function) __field( unsigned long, baseclk ) ), TP_fast_assign( __entry->timer = timer; __entry->now = jiffies; __entry->function = timer->function; __entry->baseclk = baseclk; ), TP_printk("timer=%p function=%ps now=%lu baseclk=%lu", __entry->timer, __entry->function, __entry->now, __entry->baseclk) ); /** * timer_expire_exit - called immediately after the timer callback returns * @timer: pointer to struct timer_list * * When used in combination with the timer_expire_entry tracepoint we can * determine the runtime of the timer callback function. * * NOTE: Do NOT dereference timer in TP_fast_assign. The pointer might * be invalid. We solely track the pointer. */ DEFINE_EVENT(timer_class, timer_expire_exit, TP_PROTO(struct timer_list *timer), TP_ARGS(timer) ); /** * timer_cancel - called when the timer is canceled * @timer: pointer to struct timer_list */ DEFINE_EVENT(timer_class, timer_cancel, TP_PROTO(struct timer_list *timer), TP_ARGS(timer) ); TRACE_EVENT(timer_base_idle, TP_PROTO(bool is_idle, unsigned int cpu), TP_ARGS(is_idle, cpu), TP_STRUCT__entry( __field( bool, is_idle ) __field( unsigned int, cpu ) ), TP_fast_assign( __entry->is_idle = is_idle; __entry->cpu = cpu; ), TP_printk("is_idle=%d cpu=%d", __entry->is_idle, __entry->cpu) ); #define decode_clockid(type) \ __print_symbolic(type, \ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \ { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \ { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \ { CLOCK_TAI, "CLOCK_TAI" }) #define decode_hrtimer_mode(mode) \ __print_symbolic(mode, \ { HRTIMER_MODE_ABS, "ABS" }, \ { HRTIMER_MODE_REL, "REL" }, \ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" }, \ { HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \ { HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \ { HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" }, \ { HRTIMER_MODE_ABS_HARD, "ABS|HARD" }, \ { HRTIMER_MODE_REL_HARD, "REL|HARD" }, \ { HRTIMER_MODE_ABS_PINNED_HARD, "ABS|PINNED|HARD" }, \ { HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" }) /** * hrtimer_setup - called when the hrtimer is initialized * @hrtimer: pointer to struct hrtimer * @clockid: the hrtimers clock * @mode: the hrtimers mode */ TRACE_EVENT(hrtimer_setup, TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid, enum hrtimer_mode mode), TP_ARGS(hrtimer, clockid, mode), TP_STRUCT__entry( __field( void *, hrtimer ) __field( clockid_t, clockid ) __field( enum hrtimer_mode, mode ) ), TP_fast_assign( __entry->hrtimer = hrtimer; __entry->clockid = clockid; __entry->mode = mode; ), TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, decode_clockid(__entry->clockid), decode_hrtimer_mode(__entry->mode)) ); /** * hrtimer_start - called when the hrtimer is started * @hrtimer: pointer to struct hrtimer * @mode: the hrtimers mode */ TRACE_EVENT(hrtimer_start, TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode), TP_ARGS(hrtimer, mode), TP_STRUCT__entry( __field( void *, hrtimer ) __field( void *, function ) __field( s64, expires ) __field( s64, softexpires ) __field( enum hrtimer_mode, mode ) ), TP_fast_assign( __entry->hrtimer = hrtimer; __entry->function = ACCESS_PRIVATE(hrtimer, function); __entry->expires = hrtimer_get_expires(hrtimer); __entry->softexpires = hrtimer_get_softexpires(hrtimer); __entry->mode = mode; ), TP_printk("hrtimer=%p function=%ps expires=%llu softexpires=%llu " "mode=%s", __entry->hrtimer, __entry->function, (unsigned long long) __entry->expires, (unsigned long long) __entry->softexpires, decode_hrtimer_mode(__entry->mode)) ); /** * hrtimer_expire_entry - called immediately before the hrtimer callback * @hrtimer: pointer to struct hrtimer * @now: pointer to variable which contains current time of the * timers base. * * Allows to determine the timer latency. */ TRACE_EVENT(hrtimer_expire_entry, TP_PROTO(struct hrtimer *hrtimer, ktime_t *now), TP_ARGS(hrtimer, now), TP_STRUCT__entry( __field( void *, hrtimer ) __field( s64, now ) __field( void *, function) ), TP_fast_assign( __entry->hrtimer = hrtimer; __entry->now = *now; __entry->function = ACCESS_PRIVATE(hrtimer, function); ), TP_printk("hrtimer=%p function=%ps now=%llu", __entry->hrtimer, __entry->function, (unsigned long long) __entry->now) ); DECLARE_EVENT_CLASS(hrtimer_class, TP_PROTO(struct hrtimer *hrtimer), TP_ARGS(hrtimer), TP_STRUCT__entry( __field( void *, hrtimer ) ), TP_fast_assign( __entry->hrtimer = hrtimer; ), TP_printk("hrtimer=%p", __entry->hrtimer) ); /** * hrtimer_expire_exit - called immediately after the hrtimer callback returns * @hrtimer: pointer to struct hrtimer * * When used in combination with the hrtimer_expire_entry tracepoint we can * determine the runtime of the callback function. */ DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit, TP_PROTO(struct hrtimer *hrtimer), TP_ARGS(hrtimer) ); /** * hrtimer_cancel - called when the hrtimer is canceled * @hrtimer: pointer to struct hrtimer */ DEFINE_EVENT(hrtimer_class, hrtimer_cancel, TP_PROTO(struct hrtimer *hrtimer), TP_ARGS(hrtimer) ); /** * itimer_state - called when itimer is started or canceled * @which: name of the interval timer * @value: the itimers value, itimer is canceled if value->it_value is * zero, otherwise it is started * @expires: the itimers expiry time */ TRACE_EVENT(itimer_state, TP_PROTO(int which, const struct itimerspec64 *const value, unsigned long long expires), TP_ARGS(which, value, expires), TP_STRUCT__entry( __field( int, which ) __field( unsigned long long, expires ) __field( long, value_sec ) __field( long, value_nsec ) __field( long, interval_sec ) __field( long, interval_nsec ) ), TP_fast_assign( __entry->which = which; __entry->expires = expires; __entry->value_sec = value->it_value.tv_sec; __entry->value_nsec = value->it_value.tv_nsec; __entry->interval_sec = value->it_interval.tv_sec; __entry->interval_nsec = value->it_interval.tv_nsec; ), TP_printk("which=%d expires=%llu it_value=%ld.%06ld it_interval=%ld.%06ld", __entry->which, __entry->expires, __entry->value_sec, __entry->value_nsec / NSEC_PER_USEC, __entry->interval_sec, __entry->interval_nsec / NSEC_PER_USEC) ); /** * itimer_expire - called when itimer expires * @which: type of the interval timer * @pid: pid of the process which owns the timer * @now: current time, used to calculate the latency of itimer */ TRACE_EVENT(itimer_expire, TP_PROTO(int which, struct pid *pid, unsigned long long now), TP_ARGS(which, pid, now), TP_STRUCT__entry( __field( int , which ) __field( pid_t, pid ) __field( unsigned long long, now ) ), TP_fast_assign( __entry->which = which; __entry->now = now; __entry->pid = pid_nr(pid); ), TP_printk("which=%d pid=%d now=%llu", __entry->which, (int) __entry->pid, __entry->now) ); #ifdef CONFIG_NO_HZ_COMMON #define TICK_DEP_NAMES \ tick_dep_mask_name(NONE) \ tick_dep_name(POSIX_TIMER) \ tick_dep_name(PERF_EVENTS) \ tick_dep_name(SCHED) \ tick_dep_name(CLOCK_UNSTABLE) \ tick_dep_name(RCU) \ tick_dep_name_end(RCU_EXP) #undef tick_dep_name #undef tick_dep_mask_name #undef tick_dep_name_end /* The MASK will convert to their bits and they need to be processed too */ #define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \ TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); #define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \ TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); /* NONE only has a mask defined for it */ #define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); TICK_DEP_NAMES #undef tick_dep_name #undef tick_dep_mask_name #undef tick_dep_name_end #define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep }, #define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep }, #define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep } #define show_tick_dep_name(val) \ __print_symbolic(val, TICK_DEP_NAMES) TRACE_EVENT(tick_stop, TP_PROTO(int success, int dependency), TP_ARGS(success, dependency), TP_STRUCT__entry( __field( int , success ) __field( int , dependency ) ), TP_fast_assign( __entry->success = success; __entry->dependency = dependency; ), TP_printk("success=%d dependency=%s", __entry->success, \ show_tick_dep_name(__entry->dependency)) ); #endif #endif /* _TRACE_TIMER_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_SWAB_H #define _UAPI_LINUX_SWAB_H #include <linux/types.h> #include <linux/stddef.h> #include <asm/bitsperlong.h> #include <asm/swab.h> /* * casts are necessary for constants, because we never know how for sure * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. */ #define ___constant_swab16(x) ((__u16)( \ (((__u16)(x) & (__u16)0x00ffU) << 8) | \ (((__u16)(x) & (__u16)0xff00U) >> 8))) #define ___constant_swab32(x) ((__u32)( \ (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ (((__u32)(x) & (__u32)0xff000000UL) >> 24))) #define ___constant_swab64(x) ((__u64)( \ (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) #define ___constant_swahw32(x) ((__u32)( \ (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) #define ___constant_swahb32(x) ((__u32)( \ (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) /* * Implement the following as inlines, but define the interface using * macros to allow constant folding when possible: * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 */ static inline __attribute_const__ __u16 __fswab16(__u16 val) { #if defined (__arch_swab16) return __arch_swab16(val); #else return ___constant_swab16(val); #endif } static inline __attribute_const__ __u32 __fswab32(__u32 val) { #if defined(__arch_swab32) return __arch_swab32(val); #else return ___constant_swab32(val); #endif } static inline __attribute_const__ __u64 __fswab64(__u64 val) { #if defined (__arch_swab64) return __arch_swab64(val); #elif defined(__SWAB_64_THRU_32__) __u32 h = val >> 32; __u32 l = val & ((1ULL << 32) - 1); return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); #else return ___constant_swab64(val); #endif } static inline __attribute_const__ __u32 __fswahw32(__u32 val) { #ifdef __arch_swahw32 return __arch_swahw32(val); #else return ___constant_swahw32(val); #endif } static inline __attribute_const__ __u32 __fswahb32(__u32 val) { #ifdef __arch_swahb32 return __arch_swahb32(val); #else return ___constant_swahb32(val); #endif } /** * __swab16 - return a byteswapped 16-bit value * @x: value to byteswap */ #ifdef __HAVE_BUILTIN_BSWAP16__ #define __swab16(x) (__u16)__builtin_bswap16((__u16)(x)) #else #define __swab16(x) \ (__u16)(__builtin_constant_p(x) ? \ ___constant_swab16(x) : \ __fswab16(x)) #endif /** * __swab32 - return a byteswapped 32-bit value * @x: value to byteswap */ #ifdef __HAVE_BUILTIN_BSWAP32__ #define __swab32(x) (__u32)__builtin_bswap32((__u32)(x)) #else #define __swab32(x) \ (__u32)(__builtin_constant_p(x) ? \ ___constant_swab32(x) : \ __fswab32(x)) #endif /** * __swab64 - return a byteswapped 64-bit value * @x: value to byteswap */ #ifdef __HAVE_BUILTIN_BSWAP64__ #define __swab64(x) (__u64)__builtin_bswap64((__u64)(x)) #else #define __swab64(x) \ (__u64)(__builtin_constant_p(x) ? \ ___constant_swab64(x) : \ __fswab64(x)) #endif static __always_inline unsigned long __swab(const unsigned long y) { #if __BITS_PER_LONG == 64 return __swab64(y); #else /* __BITS_PER_LONG == 32 */ return __swab32(y); #endif } /** * __swahw32 - return a word-swapped 32-bit value * @x: value to wordswap * * __swahw32(0x12340000) is 0x00001234 */ #define __swahw32(x) \ (__builtin_constant_p((__u32)(x)) ? \ ___constant_swahw32(x) : \ __fswahw32(x)) /** * __swahb32 - return a high and low byte-swapped 32-bit value * @x: value to byteswap * * __swahb32(0x12345678) is 0x34127856 */ #define __swahb32(x) \ (__builtin_constant_p((__u32)(x)) ? \ ___constant_swahb32(x) : \ __fswahb32(x)) /** * __swab16p - return a byteswapped 16-bit value from a pointer * @p: pointer to a naturally-aligned 16-bit value */ static __always_inline __u16 __swab16p(const __u16 *p) { #ifdef __arch_swab16p return __arch_swab16p(p); #else return __swab16(*p); #endif } /** * __swab32p - return a byteswapped 32-bit value from a pointer * @p: pointer to a naturally-aligned 32-bit value */ static __always_inline __u32 __swab32p(const __u32 *p) { #ifdef __arch_swab32p return __arch_swab32p(p); #else return __swab32(*p); #endif } /** * __swab64p - return a byteswapped 64-bit value from a pointer * @p: pointer to a naturally-aligned 64-bit value */ static __always_inline __u64 __swab64p(const __u64 *p) { #ifdef __arch_swab64p return __arch_swab64p(p); #else return __swab64(*p); #endif } /** * __swahw32p - return a wordswapped 32-bit value from a pointer * @p: pointer to a naturally-aligned 32-bit value * * See __swahw32() for details of wordswapping. */ static inline __u32 __swahw32p(const __u32 *p) { #ifdef __arch_swahw32p return __arch_swahw32p(p); #else return __swahw32(*p); #endif } /** * __swahb32p - return a high and low byteswapped 32-bit value from a pointer * @p: pointer to a naturally-aligned 32-bit value * * See __swahb32() for details of high/low byteswapping. */ static inline __u32 __swahb32p(const __u32 *p) { #ifdef __arch_swahb32p return __arch_swahb32p(p); #else return __swahb32(*p); #endif } /** * __swab16s - byteswap a 16-bit value in-place * @p: pointer to a naturally-aligned 16-bit value */ static inline void __swab16s(__u16 *p) { #ifdef __arch_swab16s __arch_swab16s(p); #else *p = __swab16p(p); #endif } /** * __swab32s - byteswap a 32-bit value in-place * @p: pointer to a naturally-aligned 32-bit value */ static __always_inline void __swab32s(__u32 *p) { #ifdef __arch_swab32s __arch_swab32s(p); #else *p = __swab32p(p); #endif } /** * __swab64s - byteswap a 64-bit value in-place * @p: pointer to a naturally-aligned 64-bit value */ static __always_inline void __swab64s(__u64 *p) { #ifdef __arch_swab64s __arch_swab64s(p); #else *p = __swab64p(p); #endif } /** * __swahw32s - wordswap a 32-bit value in-place * @p: pointer to a naturally-aligned 32-bit value * * See __swahw32() for details of wordswapping */ static inline void __swahw32s(__u32 *p) { #ifdef __arch_swahw32s __arch_swahw32s(p); #else *p = __swahw32p(p); #endif } /** * __swahb32s - high and low byteswap a 32-bit value in-place * @p: pointer to a naturally-aligned 32-bit value * * See __swahb32() for details of high and low byte swapping */ static inline void __swahb32s(__u32 *p) { #ifdef __arch_swahb32s __arch_swahb32s(p); #else *p = __swahb32p(p); #endif } #endif /* _UAPI_LINUX_SWAB_H */ |
6 4 2 3 7 19 3 3 18 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * <linux/usb/audio.h> -- USB Audio definitions. * * Copyright (C) 2006 Thumtronics Pty Ltd. * Developed for Thumtronics by Grey Innovation * Ben Williamson <ben.williamson@greyinnovation.com> * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. * * This file holds USB constants and structures defined * by the USB Device Class Definition for Audio Devices. * Comments below reference relevant sections of that document: * * http://www.usb.org/developers/devclass_docs/audio10.pdf * * Types and defines in this file are either specific to version 1.0 of * this standard or common for newer versions. */ #ifndef _UAPI__LINUX_USB_AUDIO_H #define _UAPI__LINUX_USB_AUDIO_H #include <linux/types.h> /* bInterfaceProtocol values to denote the version of the standard used */ #define UAC_VERSION_1 0x00 #define UAC_VERSION_2 0x20 #define UAC_VERSION_3 0x30 /* A.2 Audio Interface Subclass Codes */ #define USB_SUBCLASS_AUDIOCONTROL 0x01 #define USB_SUBCLASS_AUDIOSTREAMING 0x02 #define USB_SUBCLASS_MIDISTREAMING 0x03 /* A.5 Audio Class-Specific AC Interface Descriptor Subtypes */ #define UAC_HEADER 0x01 #define UAC_INPUT_TERMINAL 0x02 #define UAC_OUTPUT_TERMINAL 0x03 #define UAC_MIXER_UNIT 0x04 #define UAC_SELECTOR_UNIT 0x05 #define UAC_FEATURE_UNIT 0x06 #define UAC1_PROCESSING_UNIT 0x07 #define UAC1_EXTENSION_UNIT 0x08 /* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */ #define UAC_AS_GENERAL 0x01 #define UAC_FORMAT_TYPE 0x02 #define UAC_FORMAT_SPECIFIC 0x03 /* A.7 Processing Unit Process Types */ #define UAC_PROCESS_UNDEFINED 0x00 #define UAC_PROCESS_UP_DOWNMIX 0x01 #define UAC_PROCESS_DOLBY_PROLOGIC 0x02 #define UAC_PROCESS_STEREO_EXTENDER 0x03 #define UAC_PROCESS_REVERB 0x04 #define UAC_PROCESS_CHORUS 0x05 #define UAC_PROCESS_DYN_RANGE_COMP 0x06 /* A.8 Audio Class-Specific Endpoint Descriptor Subtypes */ #define UAC_EP_GENERAL 0x01 /* A.9 Audio Class-Specific Request Codes */ #define UAC_SET_ 0x00 #define UAC_GET_ 0x80 #define UAC__CUR 0x1 #define UAC__MIN 0x2 #define UAC__MAX 0x3 #define UAC__RES 0x4 #define UAC__MEM 0x5 #define UAC_SET_CUR (UAC_SET_ | UAC__CUR) #define UAC_GET_CUR (UAC_GET_ | UAC__CUR) #define UAC_SET_MIN (UAC_SET_ | UAC__MIN) #define UAC_GET_MIN (UAC_GET_ | UAC__MIN) #define UAC_SET_MAX (UAC_SET_ | UAC__MAX) #define UAC_GET_MAX (UAC_GET_ | UAC__MAX) #define UAC_SET_RES (UAC_SET_ | UAC__RES) #define UAC_GET_RES (UAC_GET_ | UAC__RES) #define UAC_SET_MEM (UAC_SET_ | UAC__MEM) #define UAC_GET_MEM (UAC_GET_ | UAC__MEM) #define UAC_GET_STAT 0xff /* A.10 Control Selector Codes */ /* A.10.1 Terminal Control Selectors */ #define UAC_TERM_COPY_PROTECT 0x01 /* A.10.2 Feature Unit Control Selectors */ #define UAC_FU_MUTE 0x01 #define UAC_FU_VOLUME 0x02 #define UAC_FU_BASS 0x03 #define UAC_FU_MID 0x04 #define UAC_FU_TREBLE 0x05 #define UAC_FU_GRAPHIC_EQUALIZER 0x06 #define UAC_FU_AUTOMATIC_GAIN 0x07 #define UAC_FU_DELAY 0x08 #define UAC_FU_BASS_BOOST 0x09 #define UAC_FU_LOUDNESS 0x0a #define UAC_CONTROL_BIT(CS) (1 << ((CS) - 1)) /* A.10.3.1 Up/Down-mix Processing Unit Controls Selectors */ #define UAC_UD_ENABLE 0x01 #define UAC_UD_MODE_SELECT 0x02 /* A.10.3.2 Dolby Prologic (tm) Processing Unit Controls Selectors */ #define UAC_DP_ENABLE 0x01 #define UAC_DP_MODE_SELECT 0x02 /* A.10.3.3 3D Stereo Extender Processing Unit Control Selectors */ #define UAC_3D_ENABLE 0x01 #define UAC_3D_SPACE 0x02 /* A.10.3.4 Reverberation Processing Unit Control Selectors */ #define UAC_REVERB_ENABLE 0x01 #define UAC_REVERB_LEVEL 0x02 #define UAC_REVERB_TIME 0x03 #define UAC_REVERB_FEEDBACK 0x04 /* A.10.3.5 Chorus Processing Unit Control Selectors */ #define UAC_CHORUS_ENABLE 0x01 #define UAC_CHORUS_LEVEL 0x02 #define UAC_CHORUS_RATE 0x03 #define UAC_CHORUS_DEPTH 0x04 /* A.10.3.6 Dynamic Range Compressor Unit Control Selectors */ #define UAC_DCR_ENABLE 0x01 #define UAC_DCR_RATE 0x02 #define UAC_DCR_MAXAMPL 0x03 #define UAC_DCR_THRESHOLD 0x04 #define UAC_DCR_ATTACK_TIME 0x05 #define UAC_DCR_RELEASE_TIME 0x06 /* A.10.4 Extension Unit Control Selectors */ #define UAC_XU_ENABLE 0x01 /* MIDI - A.1 MS Class-Specific Interface Descriptor Subtypes */ #define UAC_MS_HEADER 0x01 #define UAC_MIDI_IN_JACK 0x02 #define UAC_MIDI_OUT_JACK 0x03 /* MIDI - A.1 MS Class-Specific Endpoint Descriptor Subtypes */ #define UAC_MS_GENERAL 0x01 /* Terminals - 2.1 USB Terminal Types */ #define UAC_TERMINAL_UNDEFINED 0x100 #define UAC_TERMINAL_STREAMING 0x101 #define UAC_TERMINAL_VENDOR_SPEC 0x1FF /* Terminal Control Selectors */ /* 4.3.2 Class-Specific AC Interface Descriptor */ struct uac1_ac_header_descriptor { __u8 bLength; /* 8 + n */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ __le16 bcdADC; /* 0x0100 */ __le16 wTotalLength; /* includes Unit and Terminal desc. */ __u8 bInCollection; /* n */ __u8 baInterfaceNr[]; /* [n] */ } __attribute__ ((packed)); #define UAC_DT_AC_HEADER_SIZE(n) (8 + (n)) /* As above, but more useful for defining your own descriptors: */ #define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \ struct uac1_ac_header_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __le16 bcdADC; \ __le16 wTotalLength; \ __u8 bInCollection; \ __u8 baInterfaceNr[n]; \ } __attribute__ ((packed)) /* 4.3.2.1 Input Terminal Descriptor */ struct uac_input_terminal_descriptor { __u8 bLength; /* in bytes: 12 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* INPUT_TERMINAL descriptor subtype */ __u8 bTerminalID; /* Constant uniquely terminal ID */ __le16 wTerminalType; /* USB Audio Terminal Types */ __u8 bAssocTerminal; /* ID of the Output Terminal associated */ __u8 bNrChannels; /* Number of logical output channels */ __le16 wChannelConfig; __u8 iChannelNames; __u8 iTerminal; } __attribute__ ((packed)); #define UAC_DT_INPUT_TERMINAL_SIZE 12 /* Terminals - 2.2 Input Terminal Types */ #define UAC_INPUT_TERMINAL_UNDEFINED 0x200 #define UAC_INPUT_TERMINAL_MICROPHONE 0x201 #define UAC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202 #define UAC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203 #define UAC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204 #define UAC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205 #define UAC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206 /* Terminals - control selectors */ #define UAC_TERMINAL_CS_COPY_PROTECT_CONTROL 0x01 /* 4.3.2.2 Output Terminal Descriptor */ struct uac1_output_terminal_descriptor { __u8 bLength; /* in bytes: 9 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */ __u8 bTerminalID; /* Constant uniquely terminal ID */ __le16 wTerminalType; /* USB Audio Terminal Types */ __u8 bAssocTerminal; /* ID of the Input Terminal associated */ __u8 bSourceID; /* ID of the connected Unit or Terminal*/ __u8 iTerminal; } __attribute__ ((packed)); #define UAC_DT_OUTPUT_TERMINAL_SIZE 9 /* Terminals - 2.3 Output Terminal Types */ #define UAC_OUTPUT_TERMINAL_UNDEFINED 0x300 #define UAC_OUTPUT_TERMINAL_SPEAKER 0x301 #define UAC_OUTPUT_TERMINAL_HEADPHONES 0x302 #define UAC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303 #define UAC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304 #define UAC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305 #define UAC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306 #define UAC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307 /* Terminals - 2.4 Bi-directional Terminal Types */ #define UAC_BIDIR_TERMINAL_UNDEFINED 0x400 #define UAC_BIDIR_TERMINAL_HANDSET 0x401 #define UAC_BIDIR_TERMINAL_HEADSET 0x402 #define UAC_BIDIR_TERMINAL_SPEAKER_PHONE 0x403 #define UAC_BIDIR_TERMINAL_ECHO_SUPPRESSING 0x404 #define UAC_BIDIR_TERMINAL_ECHO_CANCELING 0x405 /* Set bControlSize = 2 as default setting */ #define UAC_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2) /* As above, but more useful for defining your own descriptors: */ #define DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(ch) \ struct uac_feature_unit_descriptor_##ch { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __u8 bUnitID; \ __u8 bSourceID; \ __u8 bControlSize; \ __le16 bmaControls[ch + 1]; \ __u8 iFeature; \ } __attribute__ ((packed)) /* 4.3.2.3 Mixer Unit Descriptor */ struct uac_mixer_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bNrInPins; __u8 baSourceID[]; } __attribute__ ((packed)); static inline __u8 uac_mixer_unit_bNrChannels(struct uac_mixer_unit_descriptor *desc) { return desc->baSourceID[desc->bNrInPins]; } static inline __u32 uac_mixer_unit_wChannelConfig(struct uac_mixer_unit_descriptor *desc, int protocol) { if (protocol == UAC_VERSION_1) return (desc->baSourceID[desc->bNrInPins + 2] << 8) | desc->baSourceID[desc->bNrInPins + 1]; else return (desc->baSourceID[desc->bNrInPins + 4] << 24) | (desc->baSourceID[desc->bNrInPins + 3] << 16) | (desc->baSourceID[desc->bNrInPins + 2] << 8) | (desc->baSourceID[desc->bNrInPins + 1]); } static inline __u8 uac_mixer_unit_iChannelNames(struct uac_mixer_unit_descriptor *desc, int protocol) { return (protocol == UAC_VERSION_1) ? desc->baSourceID[desc->bNrInPins + 3] : desc->baSourceID[desc->bNrInPins + 5]; } static inline __u8 *uac_mixer_unit_bmControls(struct uac_mixer_unit_descriptor *desc, int protocol) { switch (protocol) { case UAC_VERSION_1: return &desc->baSourceID[desc->bNrInPins + 4]; case UAC_VERSION_2: return &desc->baSourceID[desc->bNrInPins + 6]; case UAC_VERSION_3: return &desc->baSourceID[desc->bNrInPins + 2]; default: return NULL; } } static inline __u16 uac3_mixer_unit_wClusterDescrID(struct uac_mixer_unit_descriptor *desc) { return (desc->baSourceID[desc->bNrInPins + 1] << 8) | desc->baSourceID[desc->bNrInPins]; } static inline __u8 uac_mixer_unit_iMixer(struct uac_mixer_unit_descriptor *desc) { __u8 *raw = (__u8 *) desc; return raw[desc->bLength - 1]; } /* 4.3.2.4 Selector Unit Descriptor */ struct uac_selector_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUintID; __u8 bNrInPins; __u8 baSourceID[]; } __attribute__ ((packed)); static inline __u8 uac_selector_unit_iSelector(struct uac_selector_unit_descriptor *desc) { __u8 *raw = (__u8 *) desc; return raw[desc->bLength - 1]; } /* 4.3.2.5 Feature Unit Descriptor */ struct uac_feature_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bSourceID; __u8 bControlSize; __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); static inline __u8 uac_feature_unit_iFeature(struct uac_feature_unit_descriptor *desc) { __u8 *raw = (__u8 *) desc; return raw[desc->bLength - 1]; } /* 4.3.2.6 Processing Unit Descriptors */ struct uac_processing_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __le16 wProcessType; __u8 bNrInPins; __u8 baSourceID[]; } __attribute__ ((packed)); static inline __u8 uac_processing_unit_bNrChannels(struct uac_processing_unit_descriptor *desc) { return desc->baSourceID[desc->bNrInPins]; } static inline __u32 uac_processing_unit_wChannelConfig(struct uac_processing_unit_descriptor *desc, int protocol) { if (protocol == UAC_VERSION_1) return (desc->baSourceID[desc->bNrInPins + 2] << 8) | desc->baSourceID[desc->bNrInPins + 1]; else return (desc->baSourceID[desc->bNrInPins + 4] << 24) | (desc->baSourceID[desc->bNrInPins + 3] << 16) | (desc->baSourceID[desc->bNrInPins + 2] << 8) | (desc->baSourceID[desc->bNrInPins + 1]); } static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_descriptor *desc, int protocol) { return (protocol == UAC_VERSION_1) ? desc->baSourceID[desc->bNrInPins + 3] : desc->baSourceID[desc->bNrInPins + 5]; } static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc, int protocol) { switch (protocol) { case UAC_VERSION_1: return desc->baSourceID[desc->bNrInPins + 4]; case UAC_VERSION_2: return 2; /* in UAC2, this value is constant */ case UAC_VERSION_3: return 4; /* in UAC3, this value is constant */ default: return 1; } } static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, int protocol) { switch (protocol) { case UAC_VERSION_1: return &desc->baSourceID[desc->bNrInPins + 5]; case UAC_VERSION_2: return &desc->baSourceID[desc->bNrInPins + 6]; case UAC_VERSION_3: return &desc->baSourceID[desc->bNrInPins + 2]; default: return NULL; } } static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, int protocol) { __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); switch (protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: return *(uac_processing_unit_bmControls(desc, protocol) + control_size); case UAC_VERSION_3: return 0; /* UAC3 does not have this field */ } } static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, int protocol) { __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); switch (protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: return uac_processing_unit_bmControls(desc, protocol) + control_size + 1; case UAC_VERSION_3: return uac_processing_unit_bmControls(desc, protocol) + control_size; } } /* * Extension Unit (XU) has almost compatible layout with Processing Unit, but * on UAC2, it has a different bmControls size (bControlSize); it's 1 byte for * XU while 2 bytes for PU. The last iExtension field is a one-byte index as * well as iProcessing field of PU. */ static inline __u8 uac_extension_unit_bControlSize(struct uac_processing_unit_descriptor *desc, int protocol) { switch (protocol) { case UAC_VERSION_1: return desc->baSourceID[desc->bNrInPins + 4]; case UAC_VERSION_2: return 1; /* in UAC2, this value is constant */ case UAC_VERSION_3: return 4; /* in UAC3, this value is constant */ default: return 1; } } static inline __u8 uac_extension_unit_iExtension(struct uac_processing_unit_descriptor *desc, int protocol) { __u8 control_size = uac_extension_unit_bControlSize(desc, protocol); switch (protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: return *(uac_processing_unit_bmControls(desc, protocol) + control_size); case UAC_VERSION_3: return 0; /* UAC3 does not have this field */ } } /* 4.5.2 Class-Specific AS Interface Descriptor */ struct uac1_as_header_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* AS_GENERAL */ __u8 bTerminalLink; /* Terminal ID of connected Terminal */ __u8 bDelay; /* Delay introduced by the data path */ __le16 wFormatTag; /* The Audio Data Format */ } __attribute__ ((packed)); #define UAC_DT_AS_HEADER_SIZE 7 /* Formats - A.1.1 Audio Data Format Type I Codes */ #define UAC_FORMAT_TYPE_I_UNDEFINED 0x0 #define UAC_FORMAT_TYPE_I_PCM 0x1 #define UAC_FORMAT_TYPE_I_PCM8 0x2 #define UAC_FORMAT_TYPE_I_IEEE_FLOAT 0x3 #define UAC_FORMAT_TYPE_I_ALAW 0x4 #define UAC_FORMAT_TYPE_I_MULAW 0x5 struct uac_format_type_i_continuous_descriptor { __u8 bLength; /* in bytes: 8 + (ns * 3) */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ __u8 bFormatType; /* FORMAT_TYPE_1 */ __u8 bNrChannels; /* physical channels in the stream */ __u8 bSubframeSize; /* */ __u8 bBitResolution; __u8 bSamFreqType; __u8 tLowerSamFreq[3]; __u8 tUpperSamFreq[3]; } __attribute__ ((packed)); #define UAC_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14 struct uac_format_type_i_discrete_descriptor { __u8 bLength; /* in bytes: 8 + (ns * 3) */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ __u8 bFormatType; /* FORMAT_TYPE_1 */ __u8 bNrChannels; /* physical channels in the stream */ __u8 bSubframeSize; /* */ __u8 bBitResolution; __u8 bSamFreqType; __u8 tSamFreq[][3]; } __attribute__ ((packed)); #define DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(n) \ struct uac_format_type_i_discrete_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __u8 bFormatType; \ __u8 bNrChannels; \ __u8 bSubframeSize; \ __u8 bBitResolution; \ __u8 bSamFreqType; \ __u8 tSamFreq[n][3]; \ } __attribute__ ((packed)) #define UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3)) struct uac_format_type_i_ext_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __u8 bSubslotSize; __u8 bBitResolution; __u8 bHeaderLength; __u8 bControlSize; __u8 bSideBandProtocol; } __attribute__((packed)); /* Formats - Audio Data Format Type I Codes */ #define UAC_FORMAT_TYPE_II_MPEG 0x1001 #define UAC_FORMAT_TYPE_II_AC3 0x1002 struct uac_format_type_ii_discrete_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __le16 wMaxBitRate; __le16 wSamplesPerFrame; __u8 bSamFreqType; __u8 tSamFreq[][3]; } __attribute__((packed)); struct uac_format_type_ii_ext_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __le16 wMaxBitRate; __le16 wSamplesPerFrame; __u8 bHeaderLength; __u8 bSideBandProtocol; } __attribute__((packed)); /* type III */ #define UAC_FORMAT_TYPE_III_IEC1937_AC3 0x2001 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG1_LAYER1 0x2002 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_NOEXT 0x2003 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_EXT 0x2004 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_LAYER1_LS 0x2005 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_LAYER23_LS 0x2006 /* Formats - A.2 Format Type Codes */ #define UAC_FORMAT_TYPE_UNDEFINED 0x0 #define UAC_FORMAT_TYPE_I 0x1 #define UAC_FORMAT_TYPE_II 0x2 #define UAC_FORMAT_TYPE_III 0x3 #define UAC_EXT_FORMAT_TYPE_I 0x81 #define UAC_EXT_FORMAT_TYPE_II 0x82 #define UAC_EXT_FORMAT_TYPE_III 0x83 struct uac_iso_endpoint_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ __u8 bDescriptorSubtype; /* EP_GENERAL */ __u8 bmAttributes; __u8 bLockDelayUnits; __le16 wLockDelay; } __attribute__((packed)); #define UAC_ISO_ENDPOINT_DESC_SIZE 7 #define UAC_EP_CS_ATTR_SAMPLE_RATE 0x01 #define UAC_EP_CS_ATTR_PITCH_CONTROL 0x02 #define UAC_EP_CS_ATTR_FILL_MAX 0x80 /* status word format (3.7.1.1) */ #define UAC1_STATUS_TYPE_ORIG_MASK 0x0f #define UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF 0x0 #define UAC1_STATUS_TYPE_ORIG_AUDIO_STREAM_IF 0x1 #define UAC1_STATUS_TYPE_ORIG_AUDIO_STREAM_EP 0x2 #define UAC1_STATUS_TYPE_IRQ_PENDING (1 << 7) #define UAC1_STATUS_TYPE_MEM_CHANGED (1 << 6) struct uac1_status_word { __u8 bStatusType; __u8 bOriginator; } __attribute__((packed)); #endif /* _UAPI__LINUX_USB_AUDIO_H */ |
4 70 70 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 | #ifndef _LINUX_SCHED_ISOLATION_H #define _LINUX_SCHED_ISOLATION_H #include <linux/cpumask.h> #include <linux/cpuset.h> #include <linux/init.h> #include <linux/tick.h> enum hk_type { HK_TYPE_DOMAIN, HK_TYPE_MANAGED_IRQ, HK_TYPE_KERNEL_NOISE, HK_TYPE_MAX, /* * The following housekeeping types are only set by the nohz_full * boot commandline option. So they can share the same value. */ HK_TYPE_TICK = HK_TYPE_KERNEL_NOISE, HK_TYPE_TIMER = HK_TYPE_KERNEL_NOISE, HK_TYPE_RCU = HK_TYPE_KERNEL_NOISE, HK_TYPE_MISC = HK_TYPE_KERNEL_NOISE, HK_TYPE_WQ = HK_TYPE_KERNEL_NOISE, HK_TYPE_KTHREAD = HK_TYPE_KERNEL_NOISE }; #ifdef CONFIG_CPU_ISOLATION DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); extern int housekeeping_any_cpu(enum hk_type type); extern const struct cpumask *housekeeping_cpumask(enum hk_type type); extern bool housekeeping_enabled(enum hk_type type); extern void housekeeping_affine(struct task_struct *t, enum hk_type type); extern bool housekeeping_test_cpu(int cpu, enum hk_type type); extern void __init housekeeping_init(void); #else static inline int housekeeping_any_cpu(enum hk_type type) { return smp_processor_id(); } static inline const struct cpumask *housekeeping_cpumask(enum hk_type type) { return cpu_possible_mask; } static inline bool housekeeping_enabled(enum hk_type type) { return false; } static inline void housekeeping_affine(struct task_struct *t, enum hk_type type) { } static inline bool housekeeping_test_cpu(int cpu, enum hk_type type) { return true; } static inline void housekeeping_init(void) { } #endif /* CONFIG_CPU_ISOLATION */ static inline bool housekeeping_cpu(int cpu, enum hk_type type) { #ifdef CONFIG_CPU_ISOLATION if (static_branch_unlikely(&housekeeping_overridden)) return housekeeping_test_cpu(cpu, type); #endif return true; } static inline bool cpu_is_isolated(int cpu) { return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) || !housekeeping_test_cpu(cpu, HK_TYPE_TICK) || cpuset_cpu_is_isolated(cpu); } #endif /* _LINUX_SCHED_ISOLATION_H */ |
15 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM signal #if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SIGNAL_H #include <linux/signal.h> #include <linux/sched.h> #include <linux/tracepoint.h> #define TP_STORE_SIGINFO(__entry, info) \ do { \ if (info == SEND_SIG_NOINFO) { \ __entry->errno = 0; \ __entry->code = SI_USER; \ } else if (info == SEND_SIG_PRIV) { \ __entry->errno = 0; \ __entry->code = SI_KERNEL; \ } else { \ __entry->errno = info->si_errno; \ __entry->code = info->si_code; \ } \ } while (0) #ifndef TRACE_HEADER_MULTI_READ enum { TRACE_SIGNAL_DELIVERED, TRACE_SIGNAL_IGNORED, TRACE_SIGNAL_ALREADY_PENDING, TRACE_SIGNAL_OVERFLOW_FAIL, TRACE_SIGNAL_LOSE_INFO, }; #endif /** * signal_generate - called when a signal is generated * @sig: signal number * @info: pointer to struct siginfo * @task: pointer to struct task_struct * @group: shared or private * @result: TRACE_SIGNAL_* * * Current process sends a 'sig' signal to 'task' process with * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV, * 'info' is not a pointer and you can't access its field. Instead, * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV * means that si_code is SI_KERNEL. */ TRACE_EVENT(signal_generate, TP_PROTO(int sig, struct kernel_siginfo *info, struct task_struct *task, int group, int result), TP_ARGS(sig, info, task, group, result), TP_STRUCT__entry( __field( int, sig ) __field( int, errno ) __field( int, code ) __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, group ) __field( int, result ) ), TP_fast_assign( __entry->sig = sig; TP_STORE_SIGINFO(__entry, info); memcpy(__entry->comm, task->comm, TASK_COMM_LEN); __entry->pid = task->pid; __entry->group = group; __entry->result = result; ), TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d", __entry->sig, __entry->errno, __entry->code, __entry->comm, __entry->pid, __entry->group, __entry->result) ); /** * signal_deliver - called when a signal is delivered * @sig: signal number * @info: pointer to struct siginfo * @ka: pointer to struct k_sigaction * * A 'sig' signal is delivered to current process with 'info' siginfo, * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or * SIG_DFL. * Note that some signals reported by signal_generate tracepoint can be * lost, ignored or modified (by debugger) before hitting this tracepoint. * This means, this can show which signals are actually delivered, but * matching generated signals and delivered signals may not be correct. */ TRACE_EVENT(signal_deliver, TP_PROTO(int sig, struct kernel_siginfo *info, struct k_sigaction *ka), TP_ARGS(sig, info, ka), TP_STRUCT__entry( __field( int, sig ) __field( int, errno ) __field( int, code ) __field( unsigned long, sa_handler ) __field( unsigned long, sa_flags ) ), TP_fast_assign( __entry->sig = sig; TP_STORE_SIGINFO(__entry, info); __entry->sa_handler = (unsigned long)ka->sa.sa_handler; __entry->sa_flags = ka->sa.sa_flags; ), TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx", __entry->sig, __entry->errno, __entry->code, __entry->sa_handler, __entry->sa_flags) ); #endif /* _TRACE_SIGNAL_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> */ #ifndef __MT76_UTIL_H #define __MT76_UTIL_H #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/bitfield.h> #include <net/mac80211.h> struct mt76_worker { struct task_struct *task; void (*fn)(struct mt76_worker *); unsigned long state; }; enum { MT76_WORKER_SCHEDULED, MT76_WORKER_RUNNING, }; #define MT76_INCR(_var, _size) \ (_var = (((_var) + 1) % (_size))) int mt76_wcid_alloc(u32 *mask, int size); static inline void mt76_wcid_mask_set(u32 *mask, int idx) { mask[idx / 32] |= BIT(idx % 32); } static inline void mt76_wcid_mask_clear(u32 *mask, int idx) { mask[idx / 32] &= ~BIT(idx % 32); } static inline void mt76_skb_set_moredata(struct sk_buff *skb, bool enable) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (enable) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); else hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA); } int __mt76_worker_fn(void *ptr); static inline int mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w, void (*fn)(struct mt76_worker *), const char *name) { const char *dev_name = wiphy_name(hw->wiphy); int ret; if (fn) w->fn = fn; w->task = kthread_run(__mt76_worker_fn, w, "mt76-%s %s", name, dev_name); if (IS_ERR(w->task)) { ret = PTR_ERR(w->task); w->task = NULL; return ret; } return 0; } static inline void mt76_worker_schedule(struct mt76_worker *w) { if (!w->task) return; if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) && !test_bit(MT76_WORKER_RUNNING, &w->state)) wake_up_process(w->task); } static inline void mt76_worker_disable(struct mt76_worker *w) { if (!w->task) return; kthread_park(w->task); WRITE_ONCE(w->state, 0); } static inline void mt76_worker_enable(struct mt76_worker *w) { if (!w->task) return; kthread_unpark(w->task); mt76_worker_schedule(w); } static inline void mt76_worker_teardown(struct mt76_worker *w) { if (!w->task) return; kthread_stop(w->task); w->task = NULL; } #endif |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 22 33 27 33 22 22 19 19 5 4 4 1 20 19 20 4 20 5 5 5 5 5 5 5 5 5 5 5 4 5 5 5 4 5 5 113 6 78 67 25 65 66 21 66 66 65 32 32 64 23 23 24 1 13 1 1 9 3 1 5 9 13 1 1 1 1 1 1 27 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 26 1 1 1 1 26 26 26 26 26 2 24 7 7 1 1 1 1 1 24 24 24 24 2 2 2 2 2 2 2 2 24 20 11 2 24 21 1 21 21 21 1 1 20 1 1 5 5 21 23 1 1 21 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 10 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 7 1 1 1 1 1 1 1 1 1 1 1 1 7 7 1 1 1 1 1 1 1 1 5 3 1 1 1 1 1 1 1 1 1 1 1 1 4 2 2 1 1 1 1 1 1 1 1 1 1 16 2 2 14 3 11 58 64 17 63 63 11 64 10 64 64 33 33 2 31 33 33 33 33 33 33 2 79 56 53 3 1 2 22 22 22 22 22 22 22 21 1 5 21 22 22 4 2 2 2 3 4 4 4 22 21 2 5 5 5 21 19 19 18 17 20 2 28 28 28 17 17 15 10 16 5 2 1 1 1 5 22 22 18 2 16 22 44 50 20 22 20 20 1 20 2 2 2 2 100 22 2 1 99 100 22 22 20 18 18 1 18 79 79 79 78 79 78 78 77 79 79 6 6 6 6 6 6 6 14 15 15 79 79 79 79 78 1 78 78 2 2 79 79 79 79 280 215 39 38 39 279 100 100 100 100 100 82 78 24 55 70 79 79 79 24 79 100 100 78 22 79 6 73 72 99 27 72 11 27 113 112 86 86 86 112 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2000-2001 Vojtech Pavlik * Copyright (c) 2006-2010 Jiri Kosina * * HID to Linux Input mapping */ /* * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/hid.h> #include <linux/hid-debug.h> #include "hid-ids.h" #define unk KEY_UNKNOWN static const unsigned char hid_keyboard[256] = { 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190, 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113, 115,114,unk,unk,unk,121,unk, 89, 93,124, 92, 94, 95,unk,unk,unk, 122,123, 90, 91, 85,unk,unk,unk,unk,unk,unk,unk,111,unk,unk,unk, unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk, unk,unk,unk,unk,unk,unk,179,180,unk,unk,unk,unk,unk,unk,unk,unk, unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk,unk, unk,unk,unk,unk,unk,unk,unk,unk,111,unk,unk,unk,unk,unk,unk,unk, 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, 150,158,159,128,136,177,178,176,142,152,173,140,unk,unk,unk,unk }; static const struct { __s32 x; __s32 y; } hid_hat_to_axis[] = {{ 0, 0}, { 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}}; struct usage_priority { __u32 usage; /* the HID usage associated */ bool global; /* we assume all usages to be slotted, * unless global */ unsigned int slot_overwrite; /* for globals: allows to set the usage * before or after the slots */ }; /* * hid-input will convert this list into priorities: * the first element will have the highest priority * (the length of the following array) and the last * element the lowest (1). * * hid-input will then shift the priority by 8 bits to leave some space * in case drivers want to interleave other fields. * * To accommodate slotted devices, the slot priority is * defined in the next 8 bits (defined by 0xff - slot). * * If drivers want to add fields before those, hid-input will * leave out the first 8 bits of the priority value. * * This still leaves us 65535 individual priority values. */ static const struct usage_priority hidinput_usages_priorities[] = { { /* Eraser (eraser touching) must always come before tipswitch */ .usage = HID_DG_ERASER, }, { /* Invert must always come before In Range */ .usage = HID_DG_INVERT, }, { /* Is the tip of the tool touching? */ .usage = HID_DG_TIPSWITCH, }, { /* Tip Pressure might emulate tip switch */ .usage = HID_DG_TIPPRESSURE, }, { /* In Range needs to come after the other tool states */ .usage = HID_DG_INRANGE, }, }; #define map_abs(c) hid_map_usage(hidinput, usage, &bit, &max, EV_ABS, (c)) #define map_rel(c) hid_map_usage(hidinput, usage, &bit, &max, EV_REL, (c)) #define map_key(c) hid_map_usage(hidinput, usage, &bit, &max, EV_KEY, (c)) #define map_led(c) hid_map_usage(hidinput, usage, &bit, &max, EV_LED, (c)) #define map_msc(c) hid_map_usage(hidinput, usage, &bit, &max, EV_MSC, (c)) #define map_abs_clear(c) hid_map_usage_clear(hidinput, usage, &bit, \ &max, EV_ABS, (c)) #define map_key_clear(c) hid_map_usage_clear(hidinput, usage, &bit, \ &max, EV_KEY, (c)) static bool match_scancode(struct hid_usage *usage, unsigned int cur_idx, unsigned int scancode) { return (usage->hid & (HID_USAGE_PAGE | HID_USAGE)) == scancode; } static bool match_keycode(struct hid_usage *usage, unsigned int cur_idx, unsigned int keycode) { /* * We should exclude unmapped usages when doing lookup by keycode. */ return (usage->type == EV_KEY && usage->code == keycode); } static bool match_index(struct hid_usage *usage, unsigned int cur_idx, unsigned int idx) { return cur_idx == idx; } typedef bool (*hid_usage_cmp_t)(struct hid_usage *usage, unsigned int cur_idx, unsigned int val); static struct hid_usage *hidinput_find_key(struct hid_device *hid, hid_usage_cmp_t match, unsigned int value, unsigned int *usage_idx) { unsigned int i, j, k, cur_idx = 0; struct hid_report *report; struct hid_usage *usage; for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { list_for_each_entry(report, &hid->report_enum[k].report_list, list) { for (i = 0; i < report->maxfield; i++) { for (j = 0; j < report->field[i]->maxusage; j++) { usage = report->field[i]->usage + j; if (usage->type == EV_KEY || usage->type == 0) { if (match(usage, cur_idx, value)) { if (usage_idx) *usage_idx = cur_idx; return usage; } cur_idx++; } } } } } return NULL; } static struct hid_usage *hidinput_locate_usage(struct hid_device *hid, const struct input_keymap_entry *ke, unsigned int *index) { struct hid_usage *usage; unsigned int scancode; if (ke->flags & INPUT_KEYMAP_BY_INDEX) usage = hidinput_find_key(hid, match_index, ke->index, index); else if (input_scancode_to_scalar(ke, &scancode) == 0) usage = hidinput_find_key(hid, match_scancode, scancode, index); else usage = NULL; return usage; } static int hidinput_getkeycode(struct input_dev *dev, struct input_keymap_entry *ke) { struct hid_device *hid = input_get_drvdata(dev); struct hid_usage *usage; unsigned int scancode, index; usage = hidinput_locate_usage(hid, ke, &index); if (usage) { ke->keycode = usage->type == EV_KEY ? usage->code : KEY_RESERVED; ke->index = index; scancode = usage->hid & (HID_USAGE_PAGE | HID_USAGE); ke->len = sizeof(scancode); memcpy(ke->scancode, &scancode, sizeof(scancode)); return 0; } return -EINVAL; } static int hidinput_setkeycode(struct input_dev *dev, const struct input_keymap_entry *ke, unsigned int *old_keycode) { struct hid_device *hid = input_get_drvdata(dev); struct hid_usage *usage; usage = hidinput_locate_usage(hid, ke, NULL); if (usage) { *old_keycode = usage->type == EV_KEY ? usage->code : KEY_RESERVED; usage->type = EV_KEY; usage->code = ke->keycode; clear_bit(*old_keycode, dev->keybit); set_bit(usage->code, dev->keybit); dbg_hid("Assigned keycode %d to HID usage code %x\n", usage->code, usage->hid); /* * Set the keybit for the old keycode if the old keycode is used * by another key */ if (hidinput_find_key(hid, match_keycode, *old_keycode, NULL)) set_bit(*old_keycode, dev->keybit); return 0; } return -EINVAL; } /** * hidinput_calc_abs_res - calculate an absolute axis resolution * @field: the HID report field to calculate resolution for * @code: axis code * * The formula is: * (logical_maximum - logical_minimum) * resolution = ---------------------------------------------------------- * (physical_maximum - physical_minimum) * 10 ^ unit_exponent * * as seen in the HID specification v1.11 6.2.2.7 Global Items. * * Only exponent 1 length units are processed. Centimeters and inches are * converted to millimeters. Degrees are converted to radians. */ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code) { __s32 unit_exponent = field->unit_exponent; __s32 logical_extents = field->logical_maximum - field->logical_minimum; __s32 physical_extents = field->physical_maximum - field->physical_minimum; __s32 prev; /* Check if the extents are sane */ if (logical_extents <= 0 || physical_extents <= 0) return 0; /* * Verify and convert units. * See HID specification v1.11 6.2.2.7 Global Items for unit decoding */ switch (code) { case ABS_X: case ABS_Y: case ABS_Z: case ABS_MT_POSITION_X: case ABS_MT_POSITION_Y: case ABS_MT_TOOL_X: case ABS_MT_TOOL_Y: case ABS_MT_TOUCH_MAJOR: case ABS_MT_TOUCH_MINOR: if (field->unit == 0x11) { /* If centimeters */ /* Convert to millimeters */ unit_exponent += 1; } else if (field->unit == 0x13) { /* If inches */ /* Convert to millimeters */ prev = physical_extents; physical_extents *= 254; if (physical_extents < prev) return 0; unit_exponent -= 1; } else { return 0; } break; case ABS_RX: case ABS_RY: case ABS_RZ: case ABS_WHEEL: case ABS_TILT_X: case ABS_TILT_Y: if (field->unit == 0x14) { /* If degrees */ /* Convert to radians */ prev = logical_extents; logical_extents *= 573; if (logical_extents < prev) return 0; unit_exponent += 1; } else if (field->unit != 0x12) { /* If not radians */ return 0; } break; default: return 0; } /* Apply negative unit exponent */ for (; unit_exponent < 0; unit_exponent++) { prev = logical_extents; logical_extents *= 10; if (logical_extents < prev) return 0; } /* Apply positive unit exponent */ for (; unit_exponent > 0; unit_exponent--) { prev = physical_extents; physical_extents *= 10; if (physical_extents < prev) return 0; } /* Calculate resolution */ return DIV_ROUND_CLOSEST(logical_extents, physical_extents); } EXPORT_SYMBOL_GPL(hidinput_calc_abs_res); #ifdef CONFIG_HID_BATTERY_STRENGTH static enum power_supply_property hidinput_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_SCOPE, }; #define HID_BATTERY_QUIRK_PERCENT (1 << 0) /* always reports percent */ #define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */ #define HID_BATTERY_QUIRK_IGNORE (1 << 2) /* completely ignore the battery */ #define HID_BATTERY_QUIRK_AVOID_QUERY (1 << 3) /* do not query the battery */ static const struct hid_device_id hid_battery_quirks[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD), HID_BATTERY_QUIRK_IGNORE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084), HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_3), HID_BATTERY_QUIRK_IGNORE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), HID_BATTERY_QUIRK_IGNORE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD), HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L), HID_BATTERY_QUIRK_AVOID_QUERY }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW), HID_BATTERY_QUIRK_AVOID_QUERY }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW), HID_BATTERY_QUIRK_AVOID_QUERY }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM), HID_BATTERY_QUIRK_AVOID_QUERY }, /* * Elan I2C-HID touchscreens seem to all report a non present battery, * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C-HID devices. */ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE }, {} }; static unsigned find_battery_quirk(struct hid_device *hdev) { unsigned quirks = 0; const struct hid_device_id *match; match = hid_match_id(hdev, hid_battery_quirks); if (match != NULL) quirks = match->driver_data; return quirks; } static int hidinput_scale_battery_capacity(struct hid_device *dev, int value) { if (dev->battery_min < dev->battery_max && value >= dev->battery_min && value <= dev->battery_max) value = ((value - dev->battery_min) * 100) / (dev->battery_max - dev->battery_min); return value; } static int hidinput_query_battery_capacity(struct hid_device *dev) { u8 *buf; int ret; buf = kmalloc(4, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4, dev->battery_report_type, HID_REQ_GET_REPORT); if (ret < 2) { kfree(buf); return -ENODATA; } ret = hidinput_scale_battery_capacity(dev, buf[1]); kfree(buf); return ret; } static int hidinput_get_battery_property(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) { struct hid_device *dev = power_supply_get_drvdata(psy); int value; int ret = 0; switch (prop) { case POWER_SUPPLY_PROP_PRESENT: case POWER_SUPPLY_PROP_ONLINE: val->intval = 1; break; case POWER_SUPPLY_PROP_CAPACITY: if (dev->battery_status != HID_BATTERY_REPORTED && !dev->battery_avoid_query) { value = hidinput_query_battery_capacity(dev); if (value < 0) return value; } else { value = dev->battery_capacity; } val->intval = value; break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = dev->name; break; case POWER_SUPPLY_PROP_STATUS: if (dev->battery_status != HID_BATTERY_REPORTED && !dev->battery_avoid_query) { value = hidinput_query_battery_capacity(dev); if (value < 0) return value; dev->battery_capacity = value; dev->battery_status = HID_BATTERY_QUERIED; } if (dev->battery_status == HID_BATTERY_UNKNOWN) val->intval = POWER_SUPPLY_STATUS_UNKNOWN; else val->intval = dev->battery_charge_status; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; default: ret = -EINVAL; break; } return ret; } static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field, bool is_percentage) { struct power_supply_desc *psy_desc; struct power_supply_config psy_cfg = { .drv_data = dev, }; unsigned quirks; s32 min, max; int error; if (dev->battery) return 0; /* already initialized? */ quirks = find_battery_quirk(dev); hid_dbg(dev, "device %x:%x:%x %d quirks %d\n", dev->bus, dev->vendor, dev->product, dev->version, quirks); if (quirks & HID_BATTERY_QUIRK_IGNORE) return 0; psy_desc = kzalloc(sizeof(*psy_desc), GFP_KERNEL); if (!psy_desc) return -ENOMEM; psy_desc->name = kasprintf(GFP_KERNEL, "hid-%s-battery", strlen(dev->uniq) ? dev->uniq : dev_name(&dev->dev)); if (!psy_desc->name) { error = -ENOMEM; goto err_free_mem; } psy_desc->type = POWER_SUPPLY_TYPE_BATTERY; psy_desc->properties = hidinput_battery_props; psy_desc->num_properties = ARRAY_SIZE(hidinput_battery_props); psy_desc->use_for_apm = 0; psy_desc->get_property = hidinput_get_battery_property; min = field->logical_minimum; max = field->logical_maximum; if (is_percentage || (quirks & HID_BATTERY_QUIRK_PERCENT)) { min = 0; max = 100; } if (quirks & HID_BATTERY_QUIRK_FEATURE) report_type = HID_FEATURE_REPORT; dev->battery_min = min; dev->battery_max = max; dev->battery_report_type = report_type; dev->battery_report_id = field->report->id; dev->battery_charge_status = POWER_SUPPLY_STATUS_DISCHARGING; /* * Stylus is normally not connected to the device and thus we * can't query the device and get meaningful battery strength. * We have to wait for the device to report it on its own. */ dev->battery_avoid_query = report_type == HID_INPUT_REPORT && field->physical == HID_DG_STYLUS; if (quirks & HID_BATTERY_QUIRK_AVOID_QUERY) dev->battery_avoid_query = true; dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); if (IS_ERR(dev->battery)) { error = PTR_ERR(dev->battery); hid_warn(dev, "can't register power supply: %d\n", error); goto err_free_name; } power_supply_powers(dev->battery, &dev->dev); return 0; err_free_name: kfree(psy_desc->name); err_free_mem: kfree(psy_desc); dev->battery = NULL; return error; } static void hidinput_cleanup_battery(struct hid_device *dev) { const struct power_supply_desc *psy_desc; if (!dev->battery) return; psy_desc = dev->battery->desc; power_supply_unregister(dev->battery); kfree(psy_desc->name); kfree(psy_desc); dev->battery = NULL; } static void hidinput_update_battery(struct hid_device *dev, int value) { int capacity; if (!dev->battery) return; if (value == 0 || value < dev->battery_min || value > dev->battery_max) return; capacity = hidinput_scale_battery_capacity(dev, value); if (dev->battery_status != HID_BATTERY_REPORTED || capacity != dev->battery_capacity || ktime_after(ktime_get_coarse(), dev->battery_ratelimit_time)) { dev->battery_capacity = capacity; dev->battery_status = HID_BATTERY_REPORTED; dev->battery_ratelimit_time = ktime_add_ms(ktime_get_coarse(), 30 * 1000); power_supply_changed(dev->battery); } } static bool hidinput_set_battery_charge_status(struct hid_device *dev, unsigned int usage, int value) { switch (usage) { case HID_BAT_CHARGING: dev->battery_charge_status = value ? POWER_SUPPLY_STATUS_CHARGING : POWER_SUPPLY_STATUS_DISCHARGING; return true; } return false; } #else /* !CONFIG_HID_BATTERY_STRENGTH */ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field, bool is_percentage) { return 0; } static void hidinput_cleanup_battery(struct hid_device *dev) { } static void hidinput_update_battery(struct hid_device *dev, int value) { } static bool hidinput_set_battery_charge_status(struct hid_device *dev, unsigned int usage, int value) { return false; } #endif /* CONFIG_HID_BATTERY_STRENGTH */ static bool hidinput_field_in_collection(struct hid_device *device, struct hid_field *field, unsigned int type, unsigned int usage) { struct hid_collection *collection; collection = &device->collection[field->usage->collection_index]; return collection->type == type && collection->usage == usage; } static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned int usage_index) { struct input_dev *input = hidinput->input; struct hid_device *device = input_get_drvdata(input); const struct usage_priority *usage_priority = NULL; int max = 0, code; unsigned int i = 0; unsigned long *bit = NULL; field->hidinput = hidinput; if (field->flags & HID_MAIN_ITEM_CONSTANT) goto ignore; /* Ignore if report count is out of bounds. */ if (field->report_count < 1) goto ignore; /* only LED usages are supported in output fields */ if (field->report_type == HID_OUTPUT_REPORT && (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) { goto ignore; } /* assign a priority based on the static list declared here */ for (i = 0; i < ARRAY_SIZE(hidinput_usages_priorities); i++) { if (usage->hid == hidinput_usages_priorities[i].usage) { usage_priority = &hidinput_usages_priorities[i]; field->usages_priorities[usage_index] = (ARRAY_SIZE(hidinput_usages_priorities) - i) << 8; break; } } /* * For slotted devices, we need to also add the slot index * in the priority. */ if (usage_priority && usage_priority->global) field->usages_priorities[usage_index] |= usage_priority->slot_overwrite; else field->usages_priorities[usage_index] |= (0xff - field->slot_idx) << 16; if (device->driver->input_mapping) { int ret = device->driver->input_mapping(device, hidinput, field, usage, &bit, &max); if (ret > 0) goto mapped; if (ret < 0) goto ignore; } switch (usage->hid & HID_USAGE_PAGE) { case HID_UP_UNDEFINED: goto ignore; case HID_UP_KEYBOARD: set_bit(EV_REP, input->evbit); if ((usage->hid & HID_USAGE) < 256) { if (!hid_keyboard[usage->hid & HID_USAGE]) goto ignore; map_key_clear(hid_keyboard[usage->hid & HID_USAGE]); } else map_key(KEY_UNKNOWN); break; case HID_UP_BUTTON: code = ((usage->hid - 1) & HID_USAGE); switch (field->application) { case HID_GD_MOUSE: case HID_GD_POINTER: code += BTN_MOUSE; break; case HID_GD_JOYSTICK: if (code <= 0xf) code += BTN_JOYSTICK; else code += BTN_TRIGGER_HAPPY - 0x10; break; case HID_GD_GAMEPAD: if (code <= 0xf) code += BTN_GAMEPAD; else code += BTN_TRIGGER_HAPPY - 0x10; break; case HID_CP_CONSUMER_CONTROL: if (hidinput_field_in_collection(device, field, HID_COLLECTION_NAMED_ARRAY, HID_CP_PROGRAMMABLEBUTTONS)) { if (code <= 0x1d) code += KEY_MACRO1; else code += BTN_TRIGGER_HAPPY - 0x1e; break; } fallthrough; default: switch (field->physical) { case HID_GD_MOUSE: case HID_GD_POINTER: code += BTN_MOUSE; break; case HID_GD_JOYSTICK: code += BTN_JOYSTICK; break; case HID_GD_GAMEPAD: code += BTN_GAMEPAD; break; default: code += BTN_MISC; } } map_key(code); break; case HID_UP_SIMULATION: switch (usage->hid & 0xffff) { case 0xba: map_abs(ABS_RUDDER); break; case 0xbb: map_abs(ABS_THROTTLE); break; case 0xc4: map_abs(ABS_GAS); break; case 0xc5: map_abs(ABS_BRAKE); break; case 0xc8: map_abs(ABS_WHEEL); break; default: goto ignore; } break; case HID_UP_GENDESK: if ((usage->hid & 0xf0) == 0x80) { /* SystemControl */ switch (usage->hid & 0xf) { case 0x1: map_key_clear(KEY_POWER); break; case 0x2: map_key_clear(KEY_SLEEP); break; case 0x3: map_key_clear(KEY_WAKEUP); break; case 0x4: map_key_clear(KEY_CONTEXT_MENU); break; case 0x5: map_key_clear(KEY_MENU); break; case 0x6: map_key_clear(KEY_PROG1); break; case 0x7: map_key_clear(KEY_HELP); break; case 0x8: map_key_clear(KEY_EXIT); break; case 0x9: map_key_clear(KEY_SELECT); break; case 0xa: map_key_clear(KEY_RIGHT); break; case 0xb: map_key_clear(KEY_LEFT); break; case 0xc: map_key_clear(KEY_UP); break; case 0xd: map_key_clear(KEY_DOWN); break; case 0xe: map_key_clear(KEY_POWER2); break; case 0xf: map_key_clear(KEY_RESTART); break; default: goto unknown; } break; } if ((usage->hid & 0xf0) == 0x90) { /* SystemControl & D-pad */ switch (usage->hid) { case HID_GD_UP: usage->hat_dir = 1; break; case HID_GD_DOWN: usage->hat_dir = 5; break; case HID_GD_RIGHT: usage->hat_dir = 3; break; case HID_GD_LEFT: usage->hat_dir = 7; break; case HID_GD_DO_NOT_DISTURB: map_key_clear(KEY_DO_NOT_DISTURB); break; default: goto unknown; } if (usage->hid <= HID_GD_LEFT) { if (field->dpad) { map_abs(field->dpad); goto ignore; } map_abs(ABS_HAT0X); } break; } if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */ switch (usage->hid & 0xf) { case 0x9: map_key_clear(KEY_MICMUTE); break; case 0xa: map_key_clear(KEY_ACCESSIBILITY); break; default: goto ignore; } break; } if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */ switch (usage->hid & 0xf) { case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break; default: goto ignore; } break; } /* * Some lazy vendors declare 255 usages for System Control, * leading to the creation of ABS_X|Y axis and too many others. * It wouldn't be a problem if joydev doesn't consider the * device as a joystick then. */ if (field->application == HID_GD_SYSTEM_CONTROL) goto ignore; switch (usage->hid) { /* These usage IDs map directly to the usage codes. */ case HID_GD_X: case HID_GD_Y: case HID_GD_Z: case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ: if (field->flags & HID_MAIN_ITEM_RELATIVE) map_rel(usage->hid & 0xf); else map_abs_clear(usage->hid & 0xf); break; case HID_GD_WHEEL: if (field->flags & HID_MAIN_ITEM_RELATIVE) { set_bit(REL_WHEEL, input->relbit); map_rel(REL_WHEEL_HI_RES); } else { map_abs(usage->hid & 0xf); } break; case HID_GD_SLIDER: case HID_GD_DIAL: if (field->flags & HID_MAIN_ITEM_RELATIVE) map_rel(usage->hid & 0xf); else map_abs(usage->hid & 0xf); break; case HID_GD_HATSWITCH: usage->hat_min = field->logical_minimum; usage->hat_max = field->logical_maximum; map_abs(ABS_HAT0X); break; case HID_GD_START: map_key_clear(BTN_START); break; case HID_GD_SELECT: map_key_clear(BTN_SELECT); break; case HID_GD_RFKILL_BTN: /* MS wireless radio ctl extension, also check CA */ if (field->application == HID_GD_WIRELESS_RADIO_CTLS) { map_key_clear(KEY_RFKILL); /* We need to simulate the btn release */ field->flags |= HID_MAIN_ITEM_RELATIVE; break; } goto unknown; default: goto unknown; } break; case HID_UP_LED: switch (usage->hid & 0xffff) { /* HID-Value: */ case 0x01: map_led (LED_NUML); break; /* "Num Lock" */ case 0x02: map_led (LED_CAPSL); break; /* "Caps Lock" */ case 0x03: map_led (LED_SCROLLL); break; /* "Scroll Lock" */ case 0x04: map_led (LED_COMPOSE); break; /* "Compose" */ case 0x05: map_led (LED_KANA); break; /* "Kana" */ case 0x27: map_led (LED_SLEEP); break; /* "Stand-By" */ case 0x4c: map_led (LED_SUSPEND); break; /* "System Suspend" */ case 0x09: map_led (LED_MUTE); break; /* "Mute" */ case 0x4b: map_led (LED_MISC); break; /* "Generic Indicator" */ case 0x19: map_led (LED_MAIL); break; /* "Message Waiting" */ case 0x4d: map_led (LED_CHARGING); break; /* "External Power Connected" */ default: goto ignore; } break; case HID_UP_DIGITIZER: if ((field->application & 0xff) == 0x01) /* Digitizer */ __set_bit(INPUT_PROP_POINTER, input->propbit); else if ((field->application & 0xff) == 0x02) /* Pen */ __set_bit(INPUT_PROP_DIRECT, input->propbit); switch (usage->hid & 0xff) { case 0x00: /* Undefined */ goto ignore; case 0x30: /* TipPressure */ if (!test_bit(BTN_TOUCH, input->keybit)) { device->quirks |= HID_QUIRK_NOTOUCH; set_bit(EV_KEY, input->evbit); set_bit(BTN_TOUCH, input->keybit); } map_abs_clear(ABS_PRESSURE); break; case 0x32: /* InRange */ switch (field->physical) { case HID_DG_PUCK: map_key(BTN_TOOL_MOUSE); break; case HID_DG_FINGER: map_key(BTN_TOOL_FINGER); break; default: /* * If the physical is not given, * rely on the application. */ if (!field->physical) { switch (field->application) { case HID_DG_TOUCHSCREEN: case HID_DG_TOUCHPAD: map_key_clear(BTN_TOOL_FINGER); break; default: map_key_clear(BTN_TOOL_PEN); } } else { map_key(BTN_TOOL_PEN); } break; } break; case 0x3b: /* Battery Strength */ hidinput_setup_battery(device, HID_INPUT_REPORT, field, false); usage->type = EV_PWR; return; case 0x3c: /* Invert */ device->quirks &= ~HID_QUIRK_NOINVERT; map_key_clear(BTN_TOOL_RUBBER); break; case 0x3d: /* X Tilt */ map_abs_clear(ABS_TILT_X); break; case 0x3e: /* Y Tilt */ map_abs_clear(ABS_TILT_Y); break; case 0x33: /* Touch */ case 0x42: /* TipSwitch */ case 0x43: /* TipSwitch2 */ device->quirks &= ~HID_QUIRK_NOTOUCH; map_key_clear(BTN_TOUCH); break; case 0x44: /* BarrelSwitch */ map_key_clear(BTN_STYLUS); break; case 0x45: /* ERASER */ /* * This event is reported when eraser tip touches the surface. * Actual eraser (BTN_TOOL_RUBBER) is set and released either * by Invert if tool reports proximity or by Eraser directly. */ if (!test_bit(BTN_TOOL_RUBBER, input->keybit)) { device->quirks |= HID_QUIRK_NOINVERT; set_bit(BTN_TOOL_RUBBER, input->keybit); } map_key_clear(BTN_TOUCH); break; case 0x46: /* TabletPick */ case 0x5a: /* SecondaryBarrelSwitch */ map_key_clear(BTN_STYLUS2); break; case 0x5b: /* TransducerSerialNumber */ case 0x6e: /* TransducerSerialNumber2 */ map_msc(MSC_SERIAL); break; default: goto unknown; } break; case HID_UP_TELEPHONY: switch (usage->hid & HID_USAGE) { case 0x2f: map_key_clear(KEY_MICMUTE); break; case 0xb0: map_key_clear(KEY_NUMERIC_0); break; case 0xb1: map_key_clear(KEY_NUMERIC_1); break; case 0xb2: map_key_clear(KEY_NUMERIC_2); break; case 0xb3: map_key_clear(KEY_NUMERIC_3); break; case 0xb4: map_key_clear(KEY_NUMERIC_4); break; case 0xb5: map_key_clear(KEY_NUMERIC_5); break; case 0xb6: map_key_clear(KEY_NUMERIC_6); break; case 0xb7: map_key_clear(KEY_NUMERIC_7); break; case 0xb8: map_key_clear(KEY_NUMERIC_8); break; case 0xb9: map_key_clear(KEY_NUMERIC_9); break; case 0xba: map_key_clear(KEY_NUMERIC_STAR); break; case 0xbb: map_key_clear(KEY_NUMERIC_POUND); break; case 0xbc: map_key_clear(KEY_NUMERIC_A); break; case 0xbd: map_key_clear(KEY_NUMERIC_B); break; case 0xbe: map_key_clear(KEY_NUMERIC_C); break; case 0xbf: map_key_clear(KEY_NUMERIC_D); break; default: goto ignore; } break; case HID_UP_CONSUMER: /* USB HUT v1.12, pages 75-84 */ switch (usage->hid & HID_USAGE) { case 0x000: goto ignore; case 0x030: map_key_clear(KEY_POWER); break; case 0x031: map_key_clear(KEY_RESTART); break; case 0x032: map_key_clear(KEY_SLEEP); break; case 0x034: map_key_clear(KEY_SLEEP); break; case 0x035: map_key_clear(KEY_KBDILLUMTOGGLE); break; case 0x036: map_key_clear(BTN_MISC); break; case 0x040: map_key_clear(KEY_MENU); break; /* Menu */ case 0x041: map_key_clear(KEY_SELECT); break; /* Menu Pick */ case 0x042: map_key_clear(KEY_UP); break; /* Menu Up */ case 0x043: map_key_clear(KEY_DOWN); break; /* Menu Down */ case 0x044: map_key_clear(KEY_LEFT); break; /* Menu Left */ case 0x045: map_key_clear(KEY_RIGHT); break; /* Menu Right */ case 0x046: map_key_clear(KEY_ESC); break; /* Menu Escape */ case 0x047: map_key_clear(KEY_KPPLUS); break; /* Menu Value Increase */ case 0x048: map_key_clear(KEY_KPMINUS); break; /* Menu Value Decrease */ case 0x060: map_key_clear(KEY_INFO); break; /* Data On Screen */ case 0x061: map_key_clear(KEY_SUBTITLE); break; /* Closed Caption */ case 0x063: map_key_clear(KEY_VCR); break; /* VCR/TV */ case 0x065: map_key_clear(KEY_CAMERA); break; /* Snapshot */ case 0x069: map_key_clear(KEY_RED); break; case 0x06a: map_key_clear(KEY_GREEN); break; case 0x06b: map_key_clear(KEY_BLUE); break; case 0x06c: map_key_clear(KEY_YELLOW); break; case 0x06d: map_key_clear(KEY_ASPECT_RATIO); break; case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break; case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break; case 0x072: map_key_clear(KEY_BRIGHTNESS_TOGGLE); break; case 0x073: map_key_clear(KEY_BRIGHTNESS_MIN); break; case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break; case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break; case 0x076: map_key_clear(KEY_CAMERA_ACCESS_ENABLE); break; case 0x077: map_key_clear(KEY_CAMERA_ACCESS_DISABLE); break; case 0x078: map_key_clear(KEY_CAMERA_ACCESS_TOGGLE); break; case 0x079: map_key_clear(KEY_KBDILLUMUP); break; case 0x07a: map_key_clear(KEY_KBDILLUMDOWN); break; case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE); break; case 0x082: map_key_clear(KEY_VIDEO_NEXT); break; case 0x083: map_key_clear(KEY_LAST); break; case 0x084: map_key_clear(KEY_ENTER); break; case 0x088: map_key_clear(KEY_PC); break; case 0x089: map_key_clear(KEY_TV); break; case 0x08a: map_key_clear(KEY_WWW); break; case 0x08b: map_key_clear(KEY_DVD); break; case 0x08c: map_key_clear(KEY_PHONE); break; case 0x08d: map_key_clear(KEY_PROGRAM); break; case 0x08e: map_key_clear(KEY_VIDEOPHONE); break; case 0x08f: map_key_clear(KEY_GAMES); break; case 0x090: map_key_clear(KEY_MEMO); break; case 0x091: map_key_clear(KEY_CD); break; case 0x092: map_key_clear(KEY_VCR); break; case 0x093: map_key_clear(KEY_TUNER); break; case 0x094: map_key_clear(KEY_EXIT); break; case 0x095: map_key_clear(KEY_HELP); break; case 0x096: map_key_clear(KEY_TAPE); break; case 0x097: map_key_clear(KEY_TV2); break; case 0x098: map_key_clear(KEY_SAT); break; case 0x09a: map_key_clear(KEY_PVR); break; case 0x09c: map_key_clear(KEY_CHANNELUP); break; case 0x09d: map_key_clear(KEY_CHANNELDOWN); break; case 0x0a0: map_key_clear(KEY_VCR2); break; case 0x0b0: map_key_clear(KEY_PLAY); break; case 0x0b1: map_key_clear(KEY_PAUSE); break; case 0x0b2: map_key_clear(KEY_RECORD); break; case 0x0b3: map_key_clear(KEY_FASTFORWARD); break; case 0x0b4: map_key_clear(KEY_REWIND); break; case 0x0b5: map_key_clear(KEY_NEXTSONG); break; case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break; case 0x0b7: map_key_clear(KEY_STOPCD); break; case 0x0b8: map_key_clear(KEY_EJECTCD); break; case 0x0bc: map_key_clear(KEY_MEDIA_REPEAT); break; case 0x0b9: map_key_clear(KEY_SHUFFLE); break; case 0x0bf: map_key_clear(KEY_SLOW); break; case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break; case 0x0d8: map_key_clear(KEY_DICTATE); break; case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break; case 0x0e0: map_abs_clear(ABS_VOLUME); break; case 0x0e2: map_key_clear(KEY_MUTE); break; case 0x0e5: map_key_clear(KEY_BASSBOOST); break; case 0x0e9: map_key_clear(KEY_VOLUMEUP); break; case 0x0ea: map_key_clear(KEY_VOLUMEDOWN); break; case 0x0f5: map_key_clear(KEY_SLOW); break; case 0x181: map_key_clear(KEY_BUTTONCONFIG); break; case 0x182: map_key_clear(KEY_BOOKMARKS); break; case 0x183: map_key_clear(KEY_CONFIG); break; case 0x184: map_key_clear(KEY_WORDPROCESSOR); break; case 0x185: map_key_clear(KEY_EDITOR); break; case 0x186: map_key_clear(KEY_SPREADSHEET); break; case 0x187: map_key_clear(KEY_GRAPHICSEDITOR); break; case 0x188: map_key_clear(KEY_PRESENTATION); break; case 0x189: map_key_clear(KEY_DATABASE); break; case 0x18a: map_key_clear(KEY_MAIL); break; case 0x18b: map_key_clear(KEY_NEWS); break; case 0x18c: map_key_clear(KEY_VOICEMAIL); break; case 0x18d: map_key_clear(KEY_ADDRESSBOOK); break; case 0x18e: map_key_clear(KEY_CALENDAR); break; case 0x18f: map_key_clear(KEY_TASKMANAGER); break; case 0x190: map_key_clear(KEY_JOURNAL); break; case 0x191: map_key_clear(KEY_FINANCE); break; case 0x192: map_key_clear(KEY_CALC); break; case 0x193: map_key_clear(KEY_PLAYER); break; case 0x194: map_key_clear(KEY_FILE); break; case 0x196: map_key_clear(KEY_WWW); break; case 0x199: map_key_clear(KEY_CHAT); break; case 0x19c: map_key_clear(KEY_LOGOFF); break; case 0x19e: map_key_clear(KEY_COFFEE); break; case 0x19f: map_key_clear(KEY_CONTROLPANEL); break; case 0x1a2: map_key_clear(KEY_APPSELECT); break; case 0x1a3: map_key_clear(KEY_NEXT); break; case 0x1a4: map_key_clear(KEY_PREVIOUS); break; case 0x1a6: map_key_clear(KEY_HELP); break; case 0x1a7: map_key_clear(KEY_DOCUMENTS); break; case 0x1ab: map_key_clear(KEY_SPELLCHECK); break; case 0x1ae: map_key_clear(KEY_KEYBOARD); break; case 0x1b1: map_key_clear(KEY_SCREENSAVER); break; case 0x1b4: map_key_clear(KEY_FILE); break; case 0x1b6: map_key_clear(KEY_IMAGES); break; case 0x1b7: map_key_clear(KEY_AUDIO); break; case 0x1b8: map_key_clear(KEY_VIDEO); break; case 0x1bc: map_key_clear(KEY_MESSENGER); break; case 0x1bd: map_key_clear(KEY_INFO); break; case 0x1cb: map_key_clear(KEY_ASSISTANT); break; case 0x201: map_key_clear(KEY_NEW); break; case 0x202: map_key_clear(KEY_OPEN); break; case 0x203: map_key_clear(KEY_CLOSE); break; case 0x204: map_key_clear(KEY_EXIT); break; case 0x207: map_key_clear(KEY_SAVE); break; case 0x208: map_key_clear(KEY_PRINT); break; case 0x209: map_key_clear(KEY_PROPS); break; case 0x21a: map_key_clear(KEY_UNDO); break; case 0x21b: map_key_clear(KEY_COPY); break; case 0x21c: map_key_clear(KEY_CUT); break; case 0x21d: map_key_clear(KEY_PASTE); break; case 0x21f: map_key_clear(KEY_FIND); break; case 0x221: map_key_clear(KEY_SEARCH); break; case 0x222: map_key_clear(KEY_GOTO); break; case 0x223: map_key_clear(KEY_HOMEPAGE); break; case 0x224: map_key_clear(KEY_BACK); break; case 0x225: map_key_clear(KEY_FORWARD); break; case 0x226: map_key_clear(KEY_STOP); break; case 0x227: map_key_clear(KEY_REFRESH); break; case 0x22a: map_key_clear(KEY_BOOKMARKS); break; case 0x22d: map_key_clear(KEY_ZOOMIN); break; case 0x22e: map_key_clear(KEY_ZOOMOUT); break; case 0x22f: map_key_clear(KEY_ZOOMRESET); break; case 0x232: map_key_clear(KEY_FULL_SCREEN); break; case 0x233: map_key_clear(KEY_SCROLLUP); break; case 0x234: map_key_clear(KEY_SCROLLDOWN); break; case 0x238: /* AC Pan */ set_bit(REL_HWHEEL, input->relbit); map_rel(REL_HWHEEL_HI_RES); break; case 0x23d: map_key_clear(KEY_EDIT); break; case 0x25f: map_key_clear(KEY_CANCEL); break; case 0x269: map_key_clear(KEY_INSERT); break; case 0x26a: map_key_clear(KEY_DELETE); break; case 0x279: map_key_clear(KEY_REDO); break; case 0x289: map_key_clear(KEY_REPLY); break; case 0x28b: map_key_clear(KEY_FORWARDMAIL); break; case 0x28c: map_key_clear(KEY_SEND); break; case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break; case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break; case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break; case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break; case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break; case 0x2ca: map_key_clear(KEY_KBDINPUTASSIST_NEXTGROUP); break; case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break; case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break; case 0x29f: map_key_clear(KEY_SCALE); break; default: map_key_clear(KEY_UNKNOWN); } break; case HID_UP_GENDEVCTRLS: switch (usage->hid) { case HID_DC_BATTERYSTRENGTH: hidinput_setup_battery(device, HID_INPUT_REPORT, field, false); usage->type = EV_PWR; return; } goto unknown; case HID_UP_BATTERY: switch (usage->hid) { case HID_BAT_ABSOLUTESTATEOFCHARGE: hidinput_setup_battery(device, HID_INPUT_REPORT, field, true); usage->type = EV_PWR; return; case HID_BAT_CHARGING: usage->type = EV_PWR; return; } goto unknown; case HID_UP_CAMERA: switch (usage->hid & HID_USAGE) { case 0x020: map_key_clear(KEY_CAMERA_FOCUS); break; case 0x021: map_key_clear(KEY_CAMERA); break; default: goto ignore; } break; case HID_UP_HPVENDOR: /* Reported on a Dutch layout HP5308 */ set_bit(EV_REP, input->evbit); switch (usage->hid & HID_USAGE) { case 0x021: map_key_clear(KEY_PRINT); break; case 0x070: map_key_clear(KEY_HP); break; case 0x071: map_key_clear(KEY_CAMERA); break; case 0x072: map_key_clear(KEY_SOUND); break; case 0x073: map_key_clear(KEY_QUESTION); break; case 0x080: map_key_clear(KEY_EMAIL); break; case 0x081: map_key_clear(KEY_CHAT); break; case 0x082: map_key_clear(KEY_SEARCH); break; case 0x083: map_key_clear(KEY_CONNECT); break; case 0x084: map_key_clear(KEY_FINANCE); break; case 0x085: map_key_clear(KEY_SPORT); break; case 0x086: map_key_clear(KEY_SHOP); break; default: goto ignore; } break; case HID_UP_HPVENDOR2: set_bit(EV_REP, input->evbit); switch (usage->hid & HID_USAGE) { case 0x001: map_key_clear(KEY_MICMUTE); break; case 0x003: map_key_clear(KEY_BRIGHTNESSDOWN); break; case 0x004: map_key_clear(KEY_BRIGHTNESSUP); break; default: goto ignore; } break; case HID_UP_MSVENDOR: goto ignore; case HID_UP_CUSTOM: /* Reported on Logitech and Apple USB keyboards */ set_bit(EV_REP, input->evbit); goto ignore; case HID_UP_LOGIVENDOR: /* intentional fallback */ case HID_UP_LOGIVENDOR2: /* intentional fallback */ case HID_UP_LOGIVENDOR3: goto ignore; case HID_UP_PID: switch (usage->hid & HID_USAGE) { case 0xa4: map_key_clear(BTN_DEAD); break; default: goto ignore; } break; default: unknown: if (field->report_size == 1) { if (field->report->type == HID_OUTPUT_REPORT) { map_led(LED_MISC); break; } map_key(BTN_MISC); break; } if (field->flags & HID_MAIN_ITEM_RELATIVE) { map_rel(REL_MISC); break; } map_abs(ABS_MISC); break; } mapped: /* Mapping failed, bail out */ if (!bit) return; if (device->driver->input_mapped && device->driver->input_mapped(device, hidinput, field, usage, &bit, &max) < 0) { /* * The driver indicated that no further generic handling * of the usage is desired. */ return; } set_bit(usage->type, input->evbit); /* * This part is *really* controversial: * - HID aims at being generic so we should do our best to export * all incoming events * - HID describes what events are, so there is no reason for ABS_X * to be mapped to ABS_Y * - HID is using *_MISC+N as a default value, but nothing prevents * *_MISC+N to overwrite a legitimate even, which confuses userspace * (for instance ABS_MISC + 7 is ABS_MT_SLOT, which has a different * processing) * * If devices still want to use this (at their own risk), they will * have to use the quirk HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE, but * the default should be a reliable mapping. */ while (usage->code <= max && test_and_set_bit(usage->code, bit)) { if (device->quirks & HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE) { usage->code = find_next_zero_bit(bit, max + 1, usage->code); } else { device->status |= HID_STAT_DUP_DETECTED; goto ignore; } } if (usage->code > max) goto ignore; if (usage->type == EV_ABS) { int a = field->logical_minimum; int b = field->logical_maximum; if ((device->quirks & HID_QUIRK_BADPAD) && (usage->code == ABS_X || usage->code == ABS_Y)) { a = field->logical_minimum = 0; b = field->logical_maximum = 255; } if (field->application == HID_GD_GAMEPAD || field->application == HID_GD_JOYSTICK) input_set_abs_params(input, usage->code, a, b, (b - a) >> 8, (b - a) >> 4); else input_set_abs_params(input, usage->code, a, b, 0, 0); input_abs_set_res(input, usage->code, hidinput_calc_abs_res(field, usage->code)); /* use a larger default input buffer for MT devices */ if (usage->code == ABS_MT_POSITION_X && input->hint_events_per_packet == 0) input_set_events_per_packet(input, 60); } if (usage->type == EV_ABS && (usage->hat_min < usage->hat_max || usage->hat_dir)) { int i; for (i = usage->code; i < usage->code + 2 && i <= max; i++) { input_set_abs_params(input, i, -1, 1, 0, 0); set_bit(i, input->absbit); } if (usage->hat_dir && !field->dpad) field->dpad = usage->code; } /* for those devices which produce Consumer volume usage as relative, * we emulate pressing volumeup/volumedown appropriate number of times * in hidinput_hid_event() */ if ((usage->type == EV_ABS) && (field->flags & HID_MAIN_ITEM_RELATIVE) && (usage->code == ABS_VOLUME)) { set_bit(KEY_VOLUMEUP, input->keybit); set_bit(KEY_VOLUMEDOWN, input->keybit); } if (usage->type == EV_KEY) { set_bit(EV_MSC, input->evbit); set_bit(MSC_SCAN, input->mscbit); } return; ignore: usage->type = 0; usage->code = 0; } static void hidinput_handle_scroll(struct hid_usage *usage, struct input_dev *input, __s32 value) { int code; int hi_res, lo_res; if (value == 0) return; if (usage->code == REL_WHEEL_HI_RES) code = REL_WHEEL; else code = REL_HWHEEL; /* * Windows reports one wheel click as value 120. Where a high-res * scroll wheel is present, a fraction of 120 is reported instead. * Our REL_WHEEL_HI_RES axis does the same because all HW must * adhere to the 120 expectation. */ hi_res = value * 120/usage->resolution_multiplier; usage->wheel_accumulated += hi_res; lo_res = usage->wheel_accumulated/120; if (lo_res) usage->wheel_accumulated -= lo_res * 120; input_event(input, EV_REL, code, lo_res); input_event(input, EV_REL, usage->code, hi_res); } static void hid_report_release_tool(struct hid_report *report, struct input_dev *input, unsigned int tool) { /* if the given tool is not currently reported, ignore */ if (!test_bit(tool, input->key)) return; /* * if the given tool was previously set, release it, * release any TOUCH and send an EV_SYN */ input_event(input, EV_KEY, BTN_TOUCH, 0); input_event(input, EV_KEY, tool, 0); input_event(input, EV_SYN, SYN_REPORT, 0); report->tool = 0; } static void hid_report_set_tool(struct hid_report *report, struct input_dev *input, unsigned int new_tool) { if (report->tool != new_tool) hid_report_release_tool(report, input, report->tool); input_event(input, EV_KEY, new_tool, 1); report->tool = new_tool; } void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct input_dev *input; struct hid_report *report = field->report; unsigned *quirks = &hid->quirks; if (!usage->type) return; if (usage->type == EV_PWR) { bool handled = hidinput_set_battery_charge_status(hid, usage->hid, value); if (!handled) hidinput_update_battery(hid, value); return; } if (!field->hidinput) return; input = field->hidinput->input; if (usage->hat_min < usage->hat_max || usage->hat_dir) { int hat_dir = usage->hat_dir; if (!hat_dir) hat_dir = (value - usage->hat_min) * 8 / (usage->hat_max - usage->hat_min + 1) + 1; if (hat_dir < 0 || hat_dir > 8) hat_dir = 0; input_event(input, usage->type, usage->code , hid_hat_to_axis[hat_dir].x); input_event(input, usage->type, usage->code + 1, hid_hat_to_axis[hat_dir].y); return; } /* * Ignore out-of-range values as per HID specification, * section 5.10 and 6.2.25, when NULL state bit is present. * When it's not, clamp the value to match Microsoft's input * driver as mentioned in "Required HID usages for digitizers": * https://msdn.microsoft.com/en-us/library/windows/hardware/dn672278(v=vs.85).asp * * The logical_minimum < logical_maximum check is done so that we * don't unintentionally discard values sent by devices which * don't specify logical min and max. */ if ((field->flags & HID_MAIN_ITEM_VARIABLE) && field->logical_minimum < field->logical_maximum) { if (field->flags & HID_MAIN_ITEM_NULL_STATE && (value < field->logical_minimum || value > field->logical_maximum)) { dbg_hid("Ignoring out-of-range value %x\n", value); return; } value = clamp(value, field->logical_minimum, field->logical_maximum); } switch (usage->hid) { case HID_DG_ERASER: report->tool_active |= !!value; /* * if eraser is set, we must enforce BTN_TOOL_RUBBER * to accommodate for devices not following the spec. */ if (value) hid_report_set_tool(report, input, BTN_TOOL_RUBBER); else if (report->tool != BTN_TOOL_RUBBER) /* value is off, tool is not rubber, ignore */ return; else if (*quirks & HID_QUIRK_NOINVERT && !test_bit(BTN_TOUCH, input->key)) { /* * There is no invert to release the tool, let hid_input * send BTN_TOUCH with scancode and release the tool after. */ hid_report_release_tool(report, input, BTN_TOOL_RUBBER); return; } /* let hid-input set BTN_TOUCH */ break; case HID_DG_INVERT: report->tool_active |= !!value; /* * If invert is set, we store BTN_TOOL_RUBBER. */ if (value) hid_report_set_tool(report, input, BTN_TOOL_RUBBER); else if (!report->tool_active) /* tool_active not set means Invert and Eraser are not set */ hid_report_release_tool(report, input, BTN_TOOL_RUBBER); /* no further processing */ return; case HID_DG_INRANGE: report->tool_active |= !!value; if (report->tool_active) { /* * if tool is not set but is marked as active, * assume ours */ if (!report->tool) report->tool = usage->code; /* drivers may have changed the value behind our back, resend it */ hid_report_set_tool(report, input, report->tool); } else { hid_report_release_tool(report, input, usage->code); } /* reset tool_active for the next event */ report->tool_active = false; /* no further processing */ return; case HID_DG_TIPSWITCH: report->tool_active |= !!value; /* if tool is set to RUBBER we should ignore the current value */ if (report->tool == BTN_TOOL_RUBBER) return; break; case HID_DG_TIPPRESSURE: if (*quirks & HID_QUIRK_NOTOUCH) { int a = field->logical_minimum; int b = field->logical_maximum; if (value > a + ((b - a) >> 3)) { input_event(input, EV_KEY, BTN_TOUCH, 1); report->tool_active = true; } } break; case HID_UP_PID | 0x83UL: /* Simultaneous Effects Max */ dbg_hid("Maximum Effects - %d\n",value); return; case HID_UP_PID | 0x7fUL: dbg_hid("PID Pool Report\n"); return; } switch (usage->type) { case EV_KEY: if (usage->code == 0) /* Key 0 is "unassigned", not KEY_UNKNOWN */ return; break; case EV_REL: if (usage->code == REL_WHEEL_HI_RES || usage->code == REL_HWHEEL_HI_RES) { hidinput_handle_scroll(usage, input, value); return; } break; case EV_ABS: if ((field->flags & HID_MAIN_ITEM_RELATIVE) && usage->code == ABS_VOLUME) { int count = abs(value); int direction = value > 0 ? KEY_VOLUMEUP : KEY_VOLUMEDOWN; int i; for (i = 0; i < count; i++) { input_event(input, EV_KEY, direction, 1); input_sync(input); input_event(input, EV_KEY, direction, 0); input_sync(input); } return; } else if (((*quirks & HID_QUIRK_X_INVERT) && usage->code == ABS_X) || ((*quirks & HID_QUIRK_Y_INVERT) && usage->code == ABS_Y)) value = field->logical_maximum - value; break; } /* * Ignore reports for absolute data if the data didn't change. This is * not only an optimization but also fixes 'dead' key reports. Some * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID * 0x31 and 0x32) report multiple keys, even though a localized keyboard * can only have one of them physically available. The 'dead' keys * report constant 0. As all map to the same keycode, they'd confuse * the input layer. If we filter the 'dead' keys on the HID level, we * skip the keycode translation and only forward real events. */ if (!(field->flags & (HID_MAIN_ITEM_RELATIVE | HID_MAIN_ITEM_BUFFERED_BYTE)) && (field->flags & HID_MAIN_ITEM_VARIABLE) && usage->usage_index < field->maxusage && value == field->value[usage->usage_index]) return; /* report the usage code as scancode if the key status has changed */ if (usage->type == EV_KEY && (!test_bit(usage->code, input->key)) == value) input_event(input, EV_MSC, MSC_SCAN, usage->hid); input_event(input, usage->type, usage->code, value); if ((field->flags & HID_MAIN_ITEM_RELATIVE) && usage->type == EV_KEY && value) { input_sync(input); input_event(input, usage->type, usage->code, 0); } } void hidinput_report_event(struct hid_device *hid, struct hid_report *report) { struct hid_input *hidinput; if (hid->quirks & HID_QUIRK_NO_INPUT_SYNC) return; list_for_each_entry(hidinput, &hid->inputs, list) input_sync(hidinput->input); } EXPORT_SYMBOL_GPL(hidinput_report_event); static int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field) { struct hid_report *report; int i, j; list_for_each_entry(report, &hid->report_enum[HID_OUTPUT_REPORT].report_list, list) { for (i = 0; i < report->maxfield; i++) { *field = report->field[i]; for (j = 0; j < (*field)->maxusage; j++) if ((*field)->usage[j].type == type && (*field)->usage[j].code == code) return j; } } return -1; } struct hid_field *hidinput_get_led_field(struct hid_device *hid) { struct hid_report *report; struct hid_field *field; int i, j; list_for_each_entry(report, &hid->report_enum[HID_OUTPUT_REPORT].report_list, list) { for (i = 0; i < report->maxfield; i++) { field = report->field[i]; for (j = 0; j < field->maxusage; j++) if (field->usage[j].type == EV_LED) return field; } } return NULL; } EXPORT_SYMBOL_GPL(hidinput_get_led_field); unsigned int hidinput_count_leds(struct hid_device *hid) { struct hid_report *report; struct hid_field *field; int i, j; unsigned int count = 0; list_for_each_entry(report, &hid->report_enum[HID_OUTPUT_REPORT].report_list, list) { for (i = 0; i < report->maxfield; i++) { field = report->field[i]; for (j = 0; j < field->maxusage; j++) if (field->usage[j].type == EV_LED && field->value[j]) count += 1; } } return count; } EXPORT_SYMBOL_GPL(hidinput_count_leds); static void hidinput_led_worker(struct work_struct *work) { struct hid_device *hid = container_of(work, struct hid_device, led_work); struct hid_field *field; struct hid_report *report; int ret; u32 len; __u8 *buf; field = hidinput_get_led_field(hid); if (!field) return; /* * field->report is accessed unlocked regarding HID core. So there might * be another incoming SET-LED request from user-space, which changes * the LED state while we assemble our outgoing buffer. However, this * doesn't matter as hid_output_report() correctly converts it into a * boolean value no matter what information is currently set on the LED * field (even garbage). So the remote device will always get a valid * request. * And in case we send a wrong value, a next led worker is spawned * for every SET-LED request so the following worker will send the * correct value, guaranteed! */ report = field->report; /* use custom SET_REPORT request if possible (asynchronous) */ if (hid->ll_driver->request) return hid->ll_driver->request(hid, report, HID_REQ_SET_REPORT); /* fall back to generic raw-output-report */ len = hid_report_len(report); buf = hid_alloc_report_buf(report, GFP_KERNEL); if (!buf) return; hid_output_report(report, buf); /* synchronous output report */ ret = hid_hw_output_report(hid, buf, len); if (ret == -ENOSYS) hid_hw_raw_request(hid, report->id, buf, len, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); kfree(buf); } static int hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct hid_device *hid = input_get_drvdata(dev); struct hid_field *field; int offset; if (type == EV_FF) return input_ff_event(dev, type, code, value); if (type != EV_LED) return -1; if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) { hid_warn(dev, "event field not found\n"); return -1; } hid_set_field(field, offset, value); schedule_work(&hid->led_work); return 0; } static int hidinput_open(struct input_dev *dev) { struct hid_device *hid = input_get_drvdata(dev); return hid_hw_open(hid); } static void hidinput_close(struct input_dev *dev) { struct hid_device *hid = input_get_drvdata(dev); hid_hw_close(hid); } static bool __hidinput_change_resolution_multipliers(struct hid_device *hid, struct hid_report *report, bool use_logical_max) { struct hid_usage *usage; bool update_needed = false; bool get_report_completed = false; int i, j; if (report->maxfield == 0) return false; for (i = 0; i < report->maxfield; i++) { __s32 value = use_logical_max ? report->field[i]->logical_maximum : report->field[i]->logical_minimum; /* There is no good reason for a Resolution * Multiplier to have a count other than 1. * Ignore that case. */ if (report->field[i]->report_count != 1) continue; for (j = 0; j < report->field[i]->maxusage; j++) { usage = &report->field[i]->usage[j]; if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER) continue; /* * If we have more than one feature within this * report we need to fill in the bits from the * others before we can overwrite the ones for the * Resolution Multiplier. * * But if we're not allowed to read from the device, * we just bail. Such a device should not exist * anyway. */ if (!get_report_completed && report->maxfield > 1) { if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS) return update_needed; hid_hw_request(hid, report, HID_REQ_GET_REPORT); hid_hw_wait(hid); get_report_completed = true; } report->field[i]->value[j] = value; update_needed = true; } } return update_needed; } static void hidinput_change_resolution_multipliers(struct hid_device *hid) { struct hid_report_enum *rep_enum; struct hid_report *rep; int ret; rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; list_for_each_entry(rep, &rep_enum->report_list, list) { bool update_needed = __hidinput_change_resolution_multipliers(hid, rep, true); if (update_needed) { ret = __hid_request(hid, rep, HID_REQ_SET_REPORT); if (ret) { __hidinput_change_resolution_multipliers(hid, rep, false); return; } } } /* refresh our structs */ hid_setup_resolution_multiplier(hid); } static void report_features(struct hid_device *hid) { struct hid_driver *drv = hid->driver; struct hid_report_enum *rep_enum; struct hid_report *rep; struct hid_usage *usage; int i, j; rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; list_for_each_entry(rep, &rep_enum->report_list, list) for (i = 0; i < rep->maxfield; i++) { /* Ignore if report count is out of bounds. */ if (rep->field[i]->report_count < 1) continue; for (j = 0; j < rep->field[i]->maxusage; j++) { usage = &rep->field[i]->usage[j]; /* Verify if Battery Strength feature is available */ if (usage->hid == HID_DC_BATTERYSTRENGTH) hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i], false); if (drv->feature_mapping) drv->feature_mapping(hid, rep->field[i], usage); } } } static struct hid_input *hidinput_allocate(struct hid_device *hid, unsigned int application) { struct hid_input *hidinput = kzalloc(sizeof(*hidinput), GFP_KERNEL); struct input_dev *input_dev = input_allocate_device(); const char *suffix = NULL; size_t suffix_len, name_len; if (!hidinput || !input_dev) goto fail; if ((hid->quirks & HID_QUIRK_INPUT_PER_APP) && hid->maxapplication > 1) { switch (application) { case HID_GD_KEYBOARD: suffix = "Keyboard"; break; case HID_GD_KEYPAD: suffix = "Keypad"; break; case HID_GD_MOUSE: suffix = "Mouse"; break; case HID_DG_PEN: /* * yes, there is an issue here: * DG_PEN -> "Stylus" * DG_STYLUS -> "Pen" * But changing this now means users with config snippets * will have to change it and the test suite will not be happy. */ suffix = "Stylus"; break; case HID_DG_STYLUS: suffix = "Pen"; break; case HID_DG_TOUCHSCREEN: suffix = "Touchscreen"; break; case HID_DG_TOUCHPAD: suffix = "Touchpad"; break; case HID_GD_SYSTEM_CONTROL: suffix = "System Control"; break; case HID_CP_CONSUMER_CONTROL: suffix = "Consumer Control"; break; case HID_GD_WIRELESS_RADIO_CTLS: suffix = "Wireless Radio Control"; break; case HID_GD_SYSTEM_MULTIAXIS: suffix = "System Multi Axis"; break; default: break; } } if (suffix) { name_len = strlen(hid->name); suffix_len = strlen(suffix); if ((name_len < suffix_len) || strcmp(hid->name + name_len - suffix_len, suffix)) { hidinput->name = kasprintf(GFP_KERNEL, "%s %s", hid->name, suffix); if (!hidinput->name) goto fail; } } input_set_drvdata(input_dev, hid); input_dev->event = hidinput_input_event; input_dev->open = hidinput_open; input_dev->close = hidinput_close; input_dev->setkeycode = hidinput_setkeycode; input_dev->getkeycode = hidinput_getkeycode; input_dev->name = hidinput->name ? hidinput->name : hid->name; input_dev->phys = hid->phys; input_dev->uniq = hid->uniq; input_dev->id.bustype = hid->bus; input_dev->id.vendor = hid->vendor; input_dev->id.product = hid->product; input_dev->id.version = hid->version; input_dev->dev.parent = &hid->dev; hidinput->input = input_dev; hidinput->application = application; list_add_tail(&hidinput->list, &hid->inputs); INIT_LIST_HEAD(&hidinput->reports); return hidinput; fail: kfree(hidinput); input_free_device(input_dev); hid_err(hid, "Out of memory during hid input probe\n"); return NULL; } static bool hidinput_has_been_populated(struct hid_input *hidinput) { int i; unsigned long r = 0; for (i = 0; i < BITS_TO_LONGS(EV_CNT); i++) r |= hidinput->input->evbit[i]; for (i = 0; i < BITS_TO_LONGS(KEY_CNT); i++) r |= hidinput->input->keybit[i]; for (i = 0; i < BITS_TO_LONGS(REL_CNT); i++) r |= hidinput->input->relbit[i]; for (i = 0; i < BITS_TO_LONGS(ABS_CNT); i++) r |= hidinput->input->absbit[i]; for (i = 0; i < BITS_TO_LONGS(MSC_CNT); i++) r |= hidinput->input->mscbit[i]; for (i = 0; i < BITS_TO_LONGS(LED_CNT); i++) r |= hidinput->input->ledbit[i]; for (i = 0; i < BITS_TO_LONGS(SND_CNT); i++) r |= hidinput->input->sndbit[i]; for (i = 0; i < BITS_TO_LONGS(FF_CNT); i++) r |= hidinput->input->ffbit[i]; for (i = 0; i < BITS_TO_LONGS(SW_CNT); i++) r |= hidinput->input->swbit[i]; return !!r; } static void hidinput_cleanup_hidinput(struct hid_device *hid, struct hid_input *hidinput) { struct hid_report *report; int i, k; list_del(&hidinput->list); input_free_device(hidinput->input); kfree(hidinput->name); for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { if (k == HID_OUTPUT_REPORT && hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS) continue; list_for_each_entry(report, &hid->report_enum[k].report_list, list) { for (i = 0; i < report->maxfield; i++) if (report->field[i]->hidinput == hidinput) report->field[i]->hidinput = NULL; } } kfree(hidinput); } static struct hid_input *hidinput_match(struct hid_report *report) { struct hid_device *hid = report->device; struct hid_input *hidinput; list_for_each_entry(hidinput, &hid->inputs, list) { if (hidinput->report && hidinput->report->id == report->id) return hidinput; } return NULL; } static struct hid_input *hidinput_match_application(struct hid_report *report) { struct hid_device *hid = report->device; struct hid_input *hidinput; list_for_each_entry(hidinput, &hid->inputs, list) { if (hidinput->application == report->application) return hidinput; /* * Keep SystemControl and ConsumerControl applications together * with the main keyboard, if present. */ if ((report->application == HID_GD_SYSTEM_CONTROL || report->application == HID_CP_CONSUMER_CONTROL) && hidinput->application == HID_GD_KEYBOARD) { return hidinput; } } return NULL; } static inline void hidinput_configure_usages(struct hid_input *hidinput, struct hid_report *report) { int i, j, k; int first_field_index = 0; int slot_collection_index = -1; int prev_collection_index = -1; unsigned int slot_idx = 0; struct hid_field *field; /* * First tag all the fields that are part of a slot, * a slot needs to have one Contact ID in the collection */ for (i = 0; i < report->maxfield; i++) { field = report->field[i]; /* ignore fields without usage */ if (field->maxusage < 1) continue; /* * janitoring when collection_index changes */ if (prev_collection_index != field->usage->collection_index) { prev_collection_index = field->usage->collection_index; first_field_index = i; } /* * if we already found a Contact ID in the collection, * tag and continue to the next. */ if (slot_collection_index == field->usage->collection_index) { field->slot_idx = slot_idx; continue; } /* check if the current field has Contact ID */ for (j = 0; j < field->maxusage; j++) { if (field->usage[j].hid == HID_DG_CONTACTID) { slot_collection_index = field->usage->collection_index; slot_idx++; /* * mark all previous fields and this one in the * current collection to be slotted. */ for (k = first_field_index; k <= i; k++) report->field[k]->slot_idx = slot_idx; break; } } } for (i = 0; i < report->maxfield; i++) for (j = 0; j < report->field[i]->maxusage; j++) hidinput_configure_usage(hidinput, report->field[i], report->field[i]->usage + j, j); } /* * Register the input device; print a message. * Configure the input layer interface * Read all reports and initialize the absolute field values. */ int hidinput_connect(struct hid_device *hid, unsigned int force) { struct hid_driver *drv = hid->driver; struct hid_report *report; struct hid_input *next, *hidinput = NULL; unsigned int application; int i, k; INIT_LIST_HEAD(&hid->inputs); INIT_WORK(&hid->led_work, hidinput_led_worker); hid->status &= ~HID_STAT_DUP_DETECTED; if (!force) { for (i = 0; i < hid->maxcollection; i++) { struct hid_collection *col = &hid->collection[i]; if (col->type == HID_COLLECTION_APPLICATION || col->type == HID_COLLECTION_PHYSICAL) if (IS_INPUT_APPLICATION(col->usage)) break; } if (i == hid->maxcollection) return -1; } report_features(hid); for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { if (k == HID_OUTPUT_REPORT && hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS) continue; list_for_each_entry(report, &hid->report_enum[k].report_list, list) { if (!report->maxfield) continue; application = report->application; /* * Find the previous hidinput report attached * to this report id. */ if (hid->quirks & HID_QUIRK_MULTI_INPUT) hidinput = hidinput_match(report); else if (hid->maxapplication > 1 && (hid->quirks & HID_QUIRK_INPUT_PER_APP)) hidinput = hidinput_match_application(report); if (!hidinput) { hidinput = hidinput_allocate(hid, application); if (!hidinput) goto out_unwind; } hidinput_configure_usages(hidinput, report); if (hid->quirks & HID_QUIRK_MULTI_INPUT) hidinput->report = report; list_add_tail(&report->hidinput_list, &hidinput->reports); } } hidinput_change_resolution_multipliers(hid); list_for_each_entry_safe(hidinput, next, &hid->inputs, list) { if (drv->input_configured && drv->input_configured(hid, hidinput)) goto out_unwind; if (!hidinput_has_been_populated(hidinput)) { /* no need to register an input device not populated */ hidinput_cleanup_hidinput(hid, hidinput); continue; } if (input_register_device(hidinput->input)) goto out_unwind; hidinput->registered = true; } if (list_empty(&hid->inputs)) { hid_err(hid, "No inputs registered, leaving\n"); goto out_unwind; } if (hid->status & HID_STAT_DUP_DETECTED) hid_dbg(hid, "Some usages could not be mapped, please use HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE if this is legitimate.\n"); return 0; out_unwind: /* unwind the ones we already registered */ hidinput_disconnect(hid); return -1; } EXPORT_SYMBOL_GPL(hidinput_connect); void hidinput_disconnect(struct hid_device *hid) { struct hid_input *hidinput, *next; hidinput_cleanup_battery(hid); list_for_each_entry_safe(hidinput, next, &hid->inputs, list) { list_del(&hidinput->list); if (hidinput->registered) input_unregister_device(hidinput->input); else input_free_device(hidinput->input); kfree(hidinput->name); kfree(hidinput); } /* led_work is spawned by input_dev callbacks, but doesn't access the * parent input_dev at all. Once all input devices are removed, we * know that led_work will never get restarted, so we can cancel it * synchronously and are safe. */ cancel_work_sync(&hid->led_work); } EXPORT_SYMBOL_GPL(hidinput_disconnect); #ifdef CONFIG_HID_KUNIT_TEST #include "hid-input-test.c" #endif |
15 15 200 200 202 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 | // SPDX-License-Identifier: GPL-2.0-only /* Common code for 32 and 64-bit NUMA */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/string.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mmzone.h> #include <linux/ctype.h> #include <linux/nodemask.h> #include <linux/sched.h> #include <linux/topology.h> #include <linux/sort.h> #include <linux/numa_memblks.h> #include <asm/e820/api.h> #include <asm/proto.h> #include <asm/dma.h> #include <asm/numa.h> #include <asm/amd/nb.h> #include "mm_internal.h" int numa_off; static __init int numa_setup(char *opt) { if (!opt) return -EINVAL; if (!strncmp(opt, "off", 3)) numa_off = 1; if (!strncmp(opt, "fake=", 5)) return numa_emu_cmdline(opt + 5); if (!strncmp(opt, "noacpi", 6)) disable_srat(); if (!strncmp(opt, "nohmat", 6)) disable_hmat(); return 0; } early_param("numa", numa_setup); /* * apicid, cpu, node mappings */ s16 __apicid_to_node[MAX_LOCAL_APIC] = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; int numa_cpu_node(int cpu) { u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu); if (apicid != BAD_APICID) return __apicid_to_node[apicid]; return NUMA_NO_NODE; } cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); /* * Map cpu index to node index */ DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); void numa_set_node(int cpu, int node) { int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); /* early setting, no percpu area yet */ if (cpu_to_node_map) { cpu_to_node_map[cpu] = node; return; } #ifdef CONFIG_DEBUG_PER_CPU_MAPS if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); dump_stack(); return; } #endif per_cpu(x86_cpu_to_node_map, cpu) = node; set_cpu_numa_node(cpu, node); } void numa_clear_node(int cpu) { numa_set_node(cpu, NUMA_NO_NODE); } /* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: cpumask_of_node() is not valid until after this is done. * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) */ void __init setup_node_to_cpumask_map(void) { unsigned int node; /* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES) setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); /* cpumask_of_node() will now work */ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); } static int __init numa_register_nodes(void) { int nid; if (!memblock_validate_numa_coverage(SZ_1M)) return -EINVAL; /* Finally register nodes. */ for_each_node_mask(nid, node_possible_map) { unsigned long start_pfn, end_pfn; /* * Note, get_pfn_range_for_nid() depends on * memblock_set_node() having already happened */ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); if (start_pfn >= end_pfn) continue; alloc_node_data(nid); node_set_online(nid); } /* Dump memblock with node info and return. */ memblock_dump_all(); return 0; } /* * There are unfortunately some poorly designed mainboards around that * only connect memory to a single CPU. This breaks the 1:1 cpu->node * mapping. To avoid this fill in the mapping for all possible CPUs, * as the number of CPUs is not known yet. We round robin the existing * nodes. */ static void __init numa_init_array(void) { int rr, i; rr = first_node(node_online_map); for (i = 0; i < nr_cpu_ids; i++) { if (early_cpu_to_node(i) != NUMA_NO_NODE) continue; numa_set_node(i, rr); rr = next_node_in(rr, node_online_map); } } static int __init numa_init(int (*init_func)(void)) { int i; int ret; for (i = 0; i < MAX_LOCAL_APIC; i++) set_apicid_to_node(i, NUMA_NO_NODE); ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true); if (ret < 0) return ret; ret = numa_register_nodes(); if (ret < 0) return ret; for (i = 0; i < nr_cpu_ids; i++) { int nid = early_cpu_to_node(i); if (nid == NUMA_NO_NODE) continue; if (!node_online(nid)) numa_clear_node(i); } numa_init_array(); return 0; } /** * dummy_numa_init - Fallback dummy NUMA init * * Used if there's no underlying NUMA architecture, NUMA initialization * fails, or NUMA is disabled on the command line. * * Must online at least one node and add memory blocks that cover all * allowed memory. This function must not fail. */ static int __init dummy_numa_init(void) { printk(KERN_INFO "%s\n", numa_off ? "NUMA turned off" : "No NUMA configuration found"); printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", 0LLU, PFN_PHYS(max_pfn) - 1); node_set(0, numa_nodes_parsed); numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); return 0; } /** * x86_numa_init - Initialize NUMA * * Try each configured NUMA initialization method until one succeeds. The * last fallback is dummy single node config encompassing whole memory and * never fails. */ void __init x86_numa_init(void) { if (!numa_off) { #ifdef CONFIG_ACPI_NUMA if (!numa_init(x86_acpi_numa_init)) return; #endif #ifdef CONFIG_AMD_NUMA if (!numa_init(amd_numa_init)) return; #endif if (acpi_disabled && !numa_init(of_numa_init)) return; } numa_init(dummy_numa_init); } /* * A node may exist which has one or more Generic Initiators but no CPUs and no * memory. * * This function must be called after init_cpu_to_node(), to ensure that any * memoryless CPU nodes have already been brought online, and before the * node_data[nid] is needed for zone list setup in build_all_zonelists(). * * When this function is called, any nodes containing either memory and/or CPUs * will already be online and there is no need to do anything extra, even if * they also contain one or more Generic Initiators. */ void __init init_gi_nodes(void) { int nid; /* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online */ for_each_node_state(nid, N_GENERIC_INITIATOR) if (!node_online(nid)) node_set_online(nid); } /* * Setup early cpu_to_node. * * Populate cpu_to_node[] only if x86_cpu_to_apicid[], * and apicid_to_node[] tables have valid entries for a CPU. * This means we skip cpu_to_node[] initialisation for NUMA * emulation and faking node case (when running a kernel compiled * for NUMA on a non NUMA box), which is OK as cpu_to_node[] * is already initialized in a round robin manner at numa_init_array, * prior to this call, and this initialization is good enough * for the fake NUMA cases. * * Called before the per_cpu areas are setup. */ void __init init_cpu_to_node(void) { int cpu; u32 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); BUG_ON(cpu_to_apicid == NULL); for_each_possible_cpu(cpu) { int node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE) continue; /* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online */ if (!node_online(node)) node_set_online(node); numa_set_node(cpu, node); } } #ifndef CONFIG_DEBUG_PER_CPU_MAPS # ifndef CONFIG_NUMA_EMU void numa_add_cpu(unsigned int cpu) { cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } void numa_remove_cpu(unsigned int cpu) { cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } # endif /* !CONFIG_NUMA_EMU */ #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ int __cpu_to_node(int cpu) { if (early_per_cpu_ptr(x86_cpu_to_node_map)) { printk(KERN_WARNING "cpu_to_node(%d): usage too early!\n", cpu); dump_stack(); return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; } return per_cpu(x86_cpu_to_node_map, cpu); } EXPORT_SYMBOL(__cpu_to_node); /* * Same function as cpu_to_node() but used if called before the * per_cpu areas are setup. */ int early_cpu_to_node(int cpu) { if (early_per_cpu_ptr(x86_cpu_to_node_map)) return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; if (!cpu_possible(cpu)) { printk(KERN_WARNING "early_cpu_to_node(%d): no per_cpu area!\n", cpu); dump_stack(); return NUMA_NO_NODE; } return per_cpu(x86_cpu_to_node_map, cpu); } void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable) { struct cpumask *mask; if (node == NUMA_NO_NODE) { /* early_cpu_to_node() already emits a warning and trace */ return; } mask = node_to_cpumask_map[node]; if (!cpumask_available(mask)) { pr_err("node_to_cpumask_map[%i] NULL\n", node); dump_stack(); return; } if (enable) cpumask_set_cpu(cpu, mask); else cpumask_clear_cpu(cpu, mask); printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, cpumask_pr_args(mask)); return; } # ifndef CONFIG_NUMA_EMU static void numa_set_cpumask(int cpu, bool enable) { debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); } void numa_add_cpu(unsigned int cpu) { numa_set_cpumask(cpu, true); } void numa_remove_cpu(unsigned int cpu) { numa_set_cpumask(cpu, false); } # endif /* !CONFIG_NUMA_EMU */ /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ const struct cpumask *cpumask_of_node(int node) { if ((unsigned)node >= nr_node_ids) { printk(KERN_WARNING "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n", node, nr_node_ids); dump_stack(); return cpu_none_mask; } if (!cpumask_available(node_to_cpumask_map[node])) { printk(KERN_WARNING "cpumask_of_node(%d): no node_to_cpumask_map!\n", node); dump_stack(); return cpu_online_mask; } return node_to_cpumask_map[node]; } EXPORT_SYMBOL(cpumask_of_node); #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ #ifdef CONFIG_NUMA_EMU void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, unsigned int nr_emu_nids) { int i, j; /* * Transform __apicid_to_node table to use emulated nids by * reverse-mapping phys_nid. The maps should always exist but fall * back to zero just in case. */ for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { if (__apicid_to_node[i] == NUMA_NO_NODE) continue; for (j = 0; j < nr_emu_nids; j++) if (__apicid_to_node[i] == emu_nid_to_phys[j]) break; __apicid_to_node[i] = j < nr_emu_nids ? j : 0; } } u64 __init numa_emu_dma_end(void) { return PFN_PHYS(MAX_DMA32_PFN); } #endif /* CONFIG_NUMA_EMU */ |
2 2 1 2 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | // SPDX-License-Identifier: GPL-2.0 /* * EZ-USB specific functions used by some of the USB to Serial drivers. * * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com) */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/usb/ezusb.h> struct ezusb_fx_type { /* EZ-USB Control and Status Register. Bit 0 controls 8051 reset */ unsigned short cpucs_reg; unsigned short max_internal_adress; }; static const struct ezusb_fx_type ezusb_fx1 = { .cpucs_reg = 0x7F92, .max_internal_adress = 0x1B3F, }; /* Commands for writing to memory */ #define WRITE_INT_RAM 0xA0 #define WRITE_EXT_RAM 0xA3 static int ezusb_writememory(struct usb_device *dev, int address, unsigned char *data, int length, __u8 request) { if (!dev) return -ENODEV; return usb_control_msg_send(dev, 0, request, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, address, 0, data, length, 3000, GFP_KERNEL); } static int ezusb_set_reset(struct usb_device *dev, unsigned short cpucs_reg, unsigned char reset_bit) { int response = ezusb_writememory(dev, cpucs_reg, &reset_bit, 1, WRITE_INT_RAM); if (response < 0) dev_err(&dev->dev, "%s-%d failed: %d\n", __func__, reset_bit, response); return response; } int ezusb_fx1_set_reset(struct usb_device *dev, unsigned char reset_bit) { return ezusb_set_reset(dev, ezusb_fx1.cpucs_reg, reset_bit); } EXPORT_SYMBOL_GPL(ezusb_fx1_set_reset); static int ezusb_ihex_firmware_download(struct usb_device *dev, struct ezusb_fx_type fx, const char *firmware_path) { int ret = -ENOENT; const struct firmware *firmware = NULL; const struct ihex_binrec *record; if (request_ihex_firmware(&firmware, firmware_path, &dev->dev)) { dev_err(&dev->dev, "%s - request \"%s\" failed\n", __func__, firmware_path); goto out; } ret = ezusb_set_reset(dev, fx.cpucs_reg, 0); if (ret < 0) goto out; record = (const struct ihex_binrec *)firmware->data; for (; record; record = ihex_next_binrec(record)) { if (be32_to_cpu(record->addr) > fx.max_internal_adress) { ret = ezusb_writememory(dev, be32_to_cpu(record->addr), (unsigned char *)record->data, be16_to_cpu(record->len), WRITE_EXT_RAM); if (ret < 0) { dev_err(&dev->dev, "%s - ezusb_writememory " "failed writing internal memory " "(%d %04X %p %d)\n", __func__, ret, be32_to_cpu(record->addr), record->data, be16_to_cpu(record->len)); goto out; } } } ret = ezusb_set_reset(dev, fx.cpucs_reg, 1); if (ret < 0) goto out; record = (const struct ihex_binrec *)firmware->data; for (; record; record = ihex_next_binrec(record)) { if (be32_to_cpu(record->addr) <= fx.max_internal_adress) { ret = ezusb_writememory(dev, be32_to_cpu(record->addr), (unsigned char *)record->data, be16_to_cpu(record->len), WRITE_INT_RAM); if (ret < 0) { dev_err(&dev->dev, "%s - ezusb_writememory " "failed writing external memory " "(%d %04X %p %d)\n", __func__, ret, be32_to_cpu(record->addr), record->data, be16_to_cpu(record->len)); goto out; } } } ret = ezusb_set_reset(dev, fx.cpucs_reg, 0); out: release_firmware(firmware); return ret; } int ezusb_fx1_ihex_firmware_download(struct usb_device *dev, const char *firmware_path) { return ezusb_ihex_firmware_download(dev, ezusb_fx1, firmware_path); } EXPORT_SYMBOL_GPL(ezusb_fx1_ihex_firmware_download); #if 0 /* * Once someone one needs these fx2 functions, uncomment them * and add them to ezusb.h and all should be good. */ static struct ezusb_fx_type ezusb_fx2 = { .cpucs_reg = 0xE600, .max_internal_adress = 0x3FFF, }; int ezusb_fx2_set_reset(struct usb_device *dev, unsigned char reset_bit) { return ezusb_set_reset(dev, ezusb_fx2.cpucs_reg, reset_bit); } EXPORT_SYMBOL_GPL(ezusb_fx2_set_reset); int ezusb_fx2_ihex_firmware_download(struct usb_device *dev, const char *firmware_path) { return ezusb_ihex_firmware_download(dev, ezusb_fx2, firmware_path); } EXPORT_SYMBOL_GPL(ezusb_fx2_ihex_firmware_download); #endif MODULE_DESCRIPTION("EZUSB device support"); MODULE_LICENSE("GPL"); |
379 379 163 376 21 21 21 21 21 21 21 21 21 21 21 21 11 12 12 11 12 12 7 7 7 7 7 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 | // SPDX-License-Identifier: GPL-2.0 /* * Fast batching percpu counters. */ #include <linux/percpu_counter.h> #include <linux/mutex.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/debugobjects.h> #ifdef CONFIG_HOTPLUG_CPU static LIST_HEAD(percpu_counters); static DEFINE_SPINLOCK(percpu_counters_lock); #endif #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER static const struct debug_obj_descr percpu_counter_debug_descr; static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state) { struct percpu_counter *fbc = addr; switch (state) { case ODEBUG_STATE_ACTIVE: percpu_counter_destroy(fbc); debug_object_free(fbc, &percpu_counter_debug_descr); return true; default: return false; } } static const struct debug_obj_descr percpu_counter_debug_descr = { .name = "percpu_counter", .fixup_free = percpu_counter_fixup_free, }; static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) { debug_object_init(fbc, &percpu_counter_debug_descr); debug_object_activate(fbc, &percpu_counter_debug_descr); } static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) { debug_object_deactivate(fbc, &percpu_counter_debug_descr); debug_object_free(fbc, &percpu_counter_debug_descr); } #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) { } static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) { } #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { int cpu; unsigned long flags; raw_spin_lock_irqsave(&fbc->lock, flags); for_each_possible_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); *pcount = 0; } fbc->count = amount; raw_spin_unlock_irqrestore(&fbc->lock, flags); } EXPORT_SYMBOL(percpu_counter_set); /* * Add to a counter while respecting batch size. * * There are 2 implementations, both dealing with the following problem: * * The decision slow path/fast path and the actual update must be atomic. * Otherwise a call in process context could check the current values and * decide that the fast path can be used. If now an interrupt occurs before * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters), * then the this_cpu_add() that is executed after the interrupt has completed * can produce values larger than "batch" or even overflows. */ #ifdef CONFIG_HAVE_CMPXCHG_LOCAL /* * Safety against interrupts is achieved in 2 ways: * 1. the fast path uses local cmpxchg (note: no lock prefix) * 2. the slow path operates with interrupts disabled */ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; unsigned long flags; count = this_cpu_read(*fbc->counters); do { if (unlikely(abs(count + amount) >= batch)) { raw_spin_lock_irqsave(&fbc->lock, flags); /* * Note: by now we might have migrated to another CPU * or the value might have changed. */ count = __this_cpu_read(*fbc->counters); fbc->count += count + amount; __this_cpu_sub(*fbc->counters, count); raw_spin_unlock_irqrestore(&fbc->lock, flags); return; } } while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount)); } #else /* * local_irq_save() is used to make the function irq safe: * - The slow path would be ok as protected by an irq-safe spinlock. * - this_cpu_add would be ok as it is irq-safe by definition. */ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; unsigned long flags; local_irq_save(flags); count = __this_cpu_read(*fbc->counters) + amount; if (abs(count) >= batch) { raw_spin_lock(&fbc->lock); fbc->count += count; __this_cpu_sub(*fbc->counters, count - amount); raw_spin_unlock(&fbc->lock); } else { this_cpu_add(*fbc->counters, amount); } local_irq_restore(flags); } #endif EXPORT_SYMBOL(percpu_counter_add_batch); /* * For percpu_counter with a big batch, the devication of its count could * be big, and there is requirement to reduce the deviation, like when the * counter's batch could be runtime decreased to get a better accuracy, * which can be achieved by running this sync function on each CPU. */ void percpu_counter_sync(struct percpu_counter *fbc) { unsigned long flags; s64 count; raw_spin_lock_irqsave(&fbc->lock, flags); count = __this_cpu_read(*fbc->counters); fbc->count += count; __this_cpu_sub(*fbc->counters, count); raw_spin_unlock_irqrestore(&fbc->lock, flags); } EXPORT_SYMBOL(percpu_counter_sync); /* * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive(). * * We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums * from CPUs that are in the process of being taken offline. Dying cpus have * been removed from the online mask, but may not have had the hotplug dead * notifier called to fold the percpu count back into the global counter sum. * By including dying CPUs in the iteration mask, we avoid this race condition * so __percpu_counter_sum() just does the right thing when CPUs are being taken * offline. */ s64 __percpu_counter_sum(struct percpu_counter *fbc) { s64 ret; int cpu; unsigned long flags; raw_spin_lock_irqsave(&fbc->lock, flags); ret = fbc->count; for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; } raw_spin_unlock_irqrestore(&fbc->lock, flags); return ret; } EXPORT_SYMBOL(__percpu_counter_sum); int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, gfp_t gfp, u32 nr_counters, struct lock_class_key *key) { unsigned long flags __maybe_unused; size_t counter_size; s32 __percpu *counters; u32 i; counter_size = ALIGN(sizeof(*counters), __alignof__(*counters)); counters = __alloc_percpu_gfp(nr_counters * counter_size, __alignof__(*counters), gfp); if (!counters) { fbc[0].counters = NULL; return -ENOMEM; } for (i = 0; i < nr_counters; i++) { raw_spin_lock_init(&fbc[i].lock); lockdep_set_class(&fbc[i].lock, key); #ifdef CONFIG_HOTPLUG_CPU INIT_LIST_HEAD(&fbc[i].list); #endif fbc[i].count = amount; fbc[i].counters = (void __percpu *)counters + i * counter_size; debug_percpu_counter_activate(&fbc[i]); } #ifdef CONFIG_HOTPLUG_CPU spin_lock_irqsave(&percpu_counters_lock, flags); for (i = 0; i < nr_counters; i++) list_add(&fbc[i].list, &percpu_counters); spin_unlock_irqrestore(&percpu_counters_lock, flags); #endif return 0; } EXPORT_SYMBOL(__percpu_counter_init_many); void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters) { unsigned long flags __maybe_unused; u32 i; if (WARN_ON_ONCE(!fbc)) return; if (!fbc[0].counters) return; for (i = 0; i < nr_counters; i++) debug_percpu_counter_deactivate(&fbc[i]); #ifdef CONFIG_HOTPLUG_CPU spin_lock_irqsave(&percpu_counters_lock, flags); for (i = 0; i < nr_counters; i++) list_del(&fbc[i].list); spin_unlock_irqrestore(&percpu_counters_lock, flags); #endif free_percpu(fbc[0].counters); for (i = 0; i < nr_counters; i++) fbc[i].counters = NULL; } EXPORT_SYMBOL(percpu_counter_destroy_many); int percpu_counter_batch __read_mostly = 32; EXPORT_SYMBOL(percpu_counter_batch); static int compute_batch_value(unsigned int cpu) { int nr = num_online_cpus(); percpu_counter_batch = max(32, nr*2); return 0; } static int percpu_counter_cpu_dead(unsigned int cpu) { #ifdef CONFIG_HOTPLUG_CPU struct percpu_counter *fbc; compute_batch_value(cpu); spin_lock_irq(&percpu_counters_lock); list_for_each_entry(fbc, &percpu_counters, list) { s32 *pcount; raw_spin_lock(&fbc->lock); pcount = per_cpu_ptr(fbc->counters, cpu); fbc->count += *pcount; *pcount = 0; raw_spin_unlock(&fbc->lock); } spin_unlock_irq(&percpu_counters_lock); #endif return 0; } /* * Compare counter against given value. * Return 1 if greater, 0 if equal and -1 if less */ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) { s64 count; count = percpu_counter_read(fbc); /* Check to see if rough count will be sufficient for comparison */ if (abs(count - rhs) > (batch * num_online_cpus())) { if (count > rhs) return 1; else return -1; } /* Need to use precise count */ count = percpu_counter_sum(fbc); if (count > rhs) return 1; else if (count < rhs) return -1; else return 0; } EXPORT_SYMBOL(__percpu_counter_compare); /* * Compare counter, and add amount if total is: less than or equal to limit if * amount is positive, or greater than or equal to limit if amount is negative. * Return true if amount is added, or false if total would be beyond the limit. * * Negative limit is allowed, but unusual. * When negative amounts (subs) are given to percpu_counter_limited_add(), * the limit would most naturally be 0 - but other limits are also allowed. * * Overflow beyond S64_MAX is not allowed for: counter, limit and amount * are all assumed to be sane (far from S64_MIN and S64_MAX). */ bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount, s32 batch) { s64 count; s64 unknown; unsigned long flags; bool good = false; if (amount == 0) return true; local_irq_save(flags); unknown = batch * num_online_cpus(); count = __this_cpu_read(*fbc->counters); /* Skip taking the lock when safe */ if (abs(count + amount) <= batch && ((amount > 0 && fbc->count + unknown <= limit) || (amount < 0 && fbc->count - unknown >= limit))) { this_cpu_add(*fbc->counters, amount); local_irq_restore(flags); return true; } raw_spin_lock(&fbc->lock); count = fbc->count + amount; /* Skip percpu_counter_sum() when safe */ if (amount > 0) { if (count - unknown > limit) goto out; if (count + unknown <= limit) good = true; } else { if (count + unknown < limit) goto out; if (count - unknown >= limit) good = true; } if (!good) { s32 *pcount; int cpu; for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) { pcount = per_cpu_ptr(fbc->counters, cpu); count += *pcount; } if (amount > 0) { if (count > limit) goto out; } else { if (count < limit) goto out; } good = true; } count = __this_cpu_read(*fbc->counters); fbc->count += count + amount; __this_cpu_sub(*fbc->counters, count); out: raw_spin_unlock(&fbc->lock); local_irq_restore(flags); return good; } static int __init percpu_counter_startup(void) { int ret; ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online", compute_batch_value, NULL); WARN_ON(ret < 0); ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD, "lib/percpu_cnt:dead", NULL, percpu_counter_cpu_dead); WARN_ON(ret < 0); return 0; } module_init(percpu_counter_startup); |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for SiGma Micro-based keyboards * * Copyright (c) 2016 Kinglong Mee * Copyright (c) 2021 Desmond Lim */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static const __u8 sm_0059_rdesc[] = { 0x05, 0x0c, /* Usage Page (Consumer Devices) 0 */ 0x09, 0x01, /* Usage (Consumer Control) 2 */ 0xa1, 0x01, /* Collection (Application) 4 */ 0x85, 0x01, /* Report ID (1) 6 */ 0x19, 0x00, /* Usage Minimum (0) 8 */ 0x2a, 0x3c, 0x02, /* Usage Maximum (572) 10 */ 0x15, 0x00, /* Logical Minimum (0) 13 */ 0x26, 0x3c, 0x02, /* Logical Maximum (572) 15 */ 0x95, 0x01, /* Report Count (1) 18 */ 0x75, 0x10, /* Report Size (16) 20 */ 0x81, 0x00, /* Input (Data,Arr,Abs) 22 */ 0xc0, /* End Collection 24 */ 0x05, 0x01, /* Usage Page (Generic Desktop) 25 */ 0x09, 0x80, /* Usage (System Control) 27 */ 0xa1, 0x01, /* Collection (Application) 29 */ 0x85, 0x02, /* Report ID (2) 31 */ 0x19, 0x81, /* Usage Minimum (129) 33 */ 0x29, 0x83, /* Usage Maximum (131) 35 */ 0x25, 0x01, /* Logical Maximum (1) 37 */ 0x75, 0x01, /* Report Size (1) 39 */ 0x95, 0x03, /* Report Count (3) 41 */ 0x81, 0x02, /* Input (Data,Var,Abs) 43 */ 0x95, 0x05, /* Report Count (5) 45 */ 0x81, 0x01, /* Input (Cnst,Arr,Abs) 47 */ 0xc0, /* End Collection 49 */ 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) 50 */ 0x09, 0x01, /* Usage (Vendor Usage 1) 53 */ 0xa1, 0x01, /* Collection (Application) 55 */ 0x85, 0x03, /* Report ID (3) 57 */ 0x1a, 0xf1, 0x00, /* Usage Minimum (241) 59 */ 0x2a, 0xf8, 0x00, /* Usage Maximum (248) 62 */ 0x15, 0x00, /* Logical Minimum (0) 65 */ 0x25, 0x01, /* Logical Maximum (1) 67 */ 0x75, 0x01, /* Report Size (1) 69 */ 0x95, 0x08, /* Report Count (8) 71 */ 0x81, 0x02, /* Input (Data,Var,Abs) 73 */ 0xc0, /* End Collection 75 */ 0x05, 0x01, /* Usage Page (Generic Desktop) 76 */ 0x09, 0x06, /* Usage (Keyboard) 78 */ 0xa1, 0x01, /* Collection (Application) 80 */ 0x85, 0x04, /* Report ID (4) 82 */ 0x05, 0x07, /* Usage Page (Keyboard) 84 */ 0x19, 0xe0, /* Usage Minimum (224) 86 */ 0x29, 0xe7, /* Usage Maximum (231) 88 */ 0x15, 0x00, /* Logical Minimum (0) 90 */ 0x25, 0x01, /* Logical Maximum (1) 92 */ 0x75, 0x01, /* Report Size (1) 94 */ 0x95, 0x08, /* Report Count (8) 96 */ 0x81, 0x00, /* Input (Data,Arr,Abs) 98 */ 0x95, 0x30, /* Report Count (48) 100 */ 0x75, 0x01, /* Report Size (1) 102 */ 0x15, 0x00, /* Logical Minimum (0) 104 */ 0x25, 0x01, /* Logical Maximum (1) 106 */ 0x05, 0x07, /* Usage Page (Keyboard) 108 */ 0x19, 0x00, /* Usage Minimum (0) 110 */ 0x29, 0x2f, /* Usage Maximum (47) 112 */ 0x81, 0x02, /* Input (Data,Var,Abs) 114 */ 0xc0, /* End Collection 116 */ 0x05, 0x01, /* Usage Page (Generic Desktop) 117 */ 0x09, 0x06, /* Usage (Keyboard) 119 */ 0xa1, 0x01, /* Collection (Application) 121 */ 0x85, 0x05, /* Report ID (5) 123 */ 0x95, 0x38, /* Report Count (56) 125 */ 0x75, 0x01, /* Report Size (1) 127 */ 0x15, 0x00, /* Logical Minimum (0) 129 */ 0x25, 0x01, /* Logical Maximum (1) 131 */ 0x05, 0x07, /* Usage Page (Keyboard) 133 */ 0x19, 0x30, /* Usage Minimum (48) 135 */ 0x29, 0x67, /* Usage Maximum (103) 137 */ 0x81, 0x02, /* Input (Data,Var,Abs) 139 */ 0xc0, /* End Collection 141 */ 0x05, 0x01, /* Usage Page (Generic Desktop) 142 */ 0x09, 0x06, /* Usage (Keyboard) 144 */ 0xa1, 0x01, /* Collection (Application) 146 */ 0x85, 0x06, /* Report ID (6) 148 */ 0x95, 0x38, /* Report Count (56) 150 */ 0x75, 0x01, /* Report Size (1) 152 */ 0x15, 0x00, /* Logical Minimum (0) 154 */ 0x25, 0x01, /* Logical Maximum (1) 156 */ 0x05, 0x07, /* Usage Page (Keyboard) 158 */ 0x19, 0x68, /* Usage Minimum (104) 160 */ 0x29, 0x9f, /* Usage Maximum (159) 162 */ 0x81, 0x02, /* Input (Data,Var,Abs) 164 */ 0xc0, /* End Collection 166 */ }; static const __u8 *sm_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == sizeof(sm_0059_rdesc) && !memcmp(sm_0059_rdesc, rdesc, *rsize)) { hid_info(hdev, "Fixing up SiGma Micro report descriptor\n"); rdesc[99] = 0x02; } return rdesc; } static const struct hid_device_id sm_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD2) }, { } }; MODULE_DEVICE_TABLE(hid, sm_devices); static struct hid_driver sm_driver = { .name = "sigmamicro", .id_table = sm_devices, .report_fixup = sm_report_fixup, }; module_hid_driver(sm_driver); MODULE_AUTHOR("Kinglong Mee <kinglongmee@gmail.com>"); MODULE_AUTHOR("Desmond Lim <peckishrine@gmail.com>"); MODULE_DESCRIPTION("SiGma Micro HID driver"); MODULE_LICENSE("GPL"); |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for USB webcams based on Konica chipset. This * chipset is used in Intel YC76 camera. * * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com> * * Based on the usbvideo v4l1 konicawc driver which is: * * Copyright (C) 2002 Simon Evans <spse@secret.org.uk> * * The code for making gspca work with a webcam with 2 isoc endpoints was * taken from the benq gspca subdriver which is: * * Copyright (C) 2009 Jean-Francois Moine (http://moinejf.free.fr) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "konica" #include <linux/input.h> #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Konica chipset USB Camera Driver"); MODULE_LICENSE("GPL"); #define WHITEBAL_REG 0x01 #define BRIGHTNESS_REG 0x02 #define SHARPNESS_REG 0x03 #define CONTRAST_REG 0x04 #define SATURATION_REG 0x05 /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct urb *last_data_urb; u8 snapshot_pressed; }; /* .priv is what goes to register 8 for this mode, known working values: 0x00 -> 176x144, cropped 0x01 -> 176x144, cropped 0x02 -> 176x144, cropped 0x03 -> 176x144, cropped 0x04 -> 176x144, binned 0x05 -> 320x240 0x06 -> 320x240 0x07 -> 160x120, cropped 0x08 -> 160x120, cropped 0x09 -> 160x120, binned (note has 136 lines) 0x0a -> 160x120, binned (note has 136 lines) 0x0b -> 160x120, cropped */ static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_KONICA420, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 136 * 3 / 2 + 960, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0x0a}, {176, 144, V4L2_PIX_FMT_KONICA420, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 960, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0x04}, {320, 240, V4L2_PIX_FMT_KONICA420, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2 + 960, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0x05}, }; static void sd_isoc_irq(struct urb *urb); static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x02, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 1000); if (ret < 0) { pr_err("reg_w err writing %02x to %02x: %d\n", value, index, ret); gspca_dev->usb_err = ret; } } static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x03, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, gspca_dev->usb_buf, 2, 1000); if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; /* * Make sure the buffer is zeroed to avoid uninitialized * values. */ memset(gspca_dev->usb_buf, 0, 2); } } static void konica_stream_on(struct gspca_dev *gspca_dev) { reg_w(gspca_dev, 1, 0x0b); } static void konica_stream_off(struct gspca_dev *gspca_dev) { reg_w(gspca_dev, 0, 0x0b); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { gspca_dev->cam.cam_mode = vga_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode); gspca_dev->cam.no_urb_create = 1; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { int i; /* * The konica needs a freaking large time to "boot" (approx 6.5 sec.), * and does not want to be bothered while doing so :| * Register 0x10 counts from 1 - 3, with 3 being "ready" */ msleep(6000); for (i = 0; i < 20; i++) { reg_r(gspca_dev, 0, 0x10); if (gspca_dev->usb_buf[0] == 3) break; msleep(100); } reg_w(gspca_dev, 0, 0x0d); return gspca_dev->usb_err; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct urb *urb; int i, n, packet_size; struct usb_host_interface *alt; struct usb_interface *intf; intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) { pr_err("Couldn't get altsetting\n"); return -EIO; } if (alt->desc.bNumEndpoints < 2) return -ENODEV; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; reg_w(gspca_dev, n, 0x08); konica_stream_on(gspca_dev); if (gspca_dev->usb_err) return gspca_dev->usb_err; /* create 4 URBs - 2 on endpoint 0x83 and 2 on 0x082 */ #if MAX_NURBS < 4 #error "Not enough URBs in the gspca table" #endif #define SD_NPKT 32 for (n = 0; n < 4; n++) { i = n & 1 ? 0 : 1; packet_size = le16_to_cpu(alt->endpoint[i].desc.wMaxPacketSize); urb = usb_alloc_urb(SD_NPKT, GFP_KERNEL); if (!urb) return -ENOMEM; gspca_dev->urb[n] = urb; urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev, packet_size * SD_NPKT, GFP_KERNEL, &urb->transfer_dma); if (urb->transfer_buffer == NULL) { pr_err("usb_buffer_alloc failed\n"); return -ENOMEM; } urb->dev = gspca_dev->dev; urb->context = gspca_dev; urb->transfer_buffer_length = packet_size * SD_NPKT; urb->pipe = usb_rcvisocpipe(gspca_dev->dev, n & 1 ? 0x81 : 0x82); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->interval = 1; urb->complete = sd_isoc_irq; urb->number_of_packets = SD_NPKT; for (i = 0; i < SD_NPKT; i++) { urb->iso_frame_desc[i].length = packet_size; urb->iso_frame_desc[i].offset = packet_size * i; } } return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd __maybe_unused = (struct sd *) gspca_dev; konica_stream_off(gspca_dev); #if IS_ENABLED(CONFIG_INPUT) /* Don't keep the button in the pressed state "forever" if it was pressed when streaming is stopped */ if (sd->snapshot_pressed) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); sd->snapshot_pressed = 0; } #endif } /* reception of an URB */ static void sd_isoc_irq(struct urb *urb) { struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; struct sd *sd = (struct sd *) gspca_dev; struct urb *data_urb, *status_urb; u8 *data; int i, st; gspca_dbg(gspca_dev, D_PACK, "sd isoc irq\n"); if (!gspca_dev->streaming) return; if (urb->status != 0) { if (urb->status == -ESHUTDOWN) return; /* disconnection */ #ifdef CONFIG_PM if (gspca_dev->frozen) return; #endif gspca_err(gspca_dev, "urb status: %d\n", urb->status); st = usb_submit_urb(urb, GFP_ATOMIC); if (st < 0) pr_err("resubmit urb error %d\n", st); return; } /* if this is a data URB (ep 0x82), wait */ if (urb->transfer_buffer_length > 32) { sd->last_data_urb = urb; return; } status_urb = urb; data_urb = sd->last_data_urb; sd->last_data_urb = NULL; if (!data_urb || data_urb->start_frame != status_urb->start_frame) { gspca_err(gspca_dev, "lost sync on frames\n"); goto resubmit; } if (data_urb->number_of_packets != status_urb->number_of_packets) { gspca_err(gspca_dev, "no packets does not match, data: %d, status: %d\n", data_urb->number_of_packets, status_urb->number_of_packets); goto resubmit; } for (i = 0; i < status_urb->number_of_packets; i++) { if (data_urb->iso_frame_desc[i].status || status_urb->iso_frame_desc[i].status) { gspca_err(gspca_dev, "pkt %d data-status %d, status-status %d\n", i, data_urb->iso_frame_desc[i].status, status_urb->iso_frame_desc[i].status); gspca_dev->last_packet_type = DISCARD_PACKET; continue; } if (status_urb->iso_frame_desc[i].actual_length != 1) { gspca_err(gspca_dev, "bad status packet length %d\n", status_urb->iso_frame_desc[i].actual_length); gspca_dev->last_packet_type = DISCARD_PACKET; continue; } st = *((u8 *)status_urb->transfer_buffer + status_urb->iso_frame_desc[i].offset); data = (u8 *)data_urb->transfer_buffer + data_urb->iso_frame_desc[i].offset; /* st: 0x80-0xff: frame start with frame number (ie 0-7f) * otherwise: * bit 0 0: keep packet * 1: drop packet (padding data) * * bit 4 0 button not clicked * 1 button clicked * button is used to `take a picture' (in software) */ if (st & 0x80) { gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); } else { #if IS_ENABLED(CONFIG_INPUT) u8 button_state = st & 0x40 ? 1 : 0; if (sd->snapshot_pressed != button_state) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, button_state); input_sync(gspca_dev->input_dev); sd->snapshot_pressed = button_state; } #endif if (st & 0x01) continue; } gspca_frame_add(gspca_dev, INTER_PACKET, data, data_urb->iso_frame_desc[i].actual_length); } resubmit: if (data_urb) { st = usb_submit_urb(data_urb, GFP_ATOMIC); if (st < 0) gspca_err(gspca_dev, "usb_submit_urb(data_urb) ret %d\n", st); } st = usb_submit_urb(status_urb, GFP_ATOMIC); if (st < 0) gspca_err(gspca_dev, "usb_submit_urb(status_urb) ret %d\n", st); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: konica_stream_off(gspca_dev); reg_w(gspca_dev, ctrl->val, BRIGHTNESS_REG); konica_stream_on(gspca_dev); break; case V4L2_CID_CONTRAST: konica_stream_off(gspca_dev); reg_w(gspca_dev, ctrl->val, CONTRAST_REG); konica_stream_on(gspca_dev); break; case V4L2_CID_SATURATION: konica_stream_off(gspca_dev); reg_w(gspca_dev, ctrl->val, SATURATION_REG); konica_stream_on(gspca_dev); break; case V4L2_CID_WHITE_BALANCE_TEMPERATURE: konica_stream_off(gspca_dev); reg_w(gspca_dev, ctrl->val, WHITEBAL_REG); konica_stream_on(gspca_dev); break; case V4L2_CID_SHARPNESS: konica_stream_off(gspca_dev); reg_w(gspca_dev, ctrl->val, SHARPNESS_REG); konica_stream_on(gspca_dev); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 5); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 9, 1, 4); /* Needs to be verified */ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 9, 1, 4); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 9, 1, 4); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_WHITE_BALANCE_TEMPERATURE, 0, 33, 1, 25); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 9, 1, 4); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, #if IS_ENABLED(CONFIG_INPUT) .other_input = 1, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x04c8, 0x0720)}, /* Intel YC 76 */ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
2 2 1 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 | // SPDX-License-Identifier: GPL-2.0 /***************************************************************************** * USBLCD Kernel Driver * * Version 1.05 * * (C) 2005 Georges Toth <g.toth@e-biz.lu> * * * * This file is licensed under the GPL. See COPYING in the package. * * Based on usb-skeleton.c 2.0 by Greg Kroah-Hartman (greg@kroah.com) * * * * * * 28.02.05 Complete rewrite of the original usblcd.c driver, * * based on usb_skeleton.c. * * This new driver allows more than one USB-LCD to be connected * * and controlled, at once * *****************************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/uaccess.h> #include <linux/usb.h> #define DRIVER_VERSION "USBLCD Driver Version 1.05" #define USBLCD_MINOR 144 #define IOCTL_GET_HARD_VERSION 1 #define IOCTL_GET_DRV_VERSION 2 static const struct usb_device_id id_table[] = { { .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); struct usb_lcd { struct usb_device *udev; /* init: probe_lcd */ struct usb_interface *interface; /* the interface for this device */ unsigned char *bulk_in_buffer; /* the buffer to receive data */ size_t bulk_in_size; /* the size of the receive buffer */ __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */ __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */ struct kref kref; struct semaphore limit_sem; /* to stop writes at full throttle from using up all RAM */ struct usb_anchor submitted; /* URBs to wait for before suspend */ struct rw_semaphore io_rwsem; unsigned long disconnected:1; }; #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref) #define USB_LCD_CONCURRENT_WRITES 5 static struct usb_driver lcd_driver; static void lcd_delete(struct kref *kref) { struct usb_lcd *dev = to_lcd_dev(kref); usb_put_dev(dev->udev); kfree(dev->bulk_in_buffer); kfree(dev); } static int lcd_open(struct inode *inode, struct file *file) { struct usb_lcd *dev; struct usb_interface *interface; int subminor, r; subminor = iminor(inode); interface = usb_find_interface(&lcd_driver, subminor); if (!interface) { pr_err("USBLCD: %s - error, can't find device for minor %d\n", __func__, subminor); return -ENODEV; } dev = usb_get_intfdata(interface); /* increment our usage count for the device */ kref_get(&dev->kref); /* grab a power reference */ r = usb_autopm_get_interface(interface); if (r < 0) { kref_put(&dev->kref, lcd_delete); return r; } /* save our object in the file's private structure */ file->private_data = dev; return 0; } static int lcd_release(struct inode *inode, struct file *file) { struct usb_lcd *dev; dev = file->private_data; if (dev == NULL) return -ENODEV; /* decrement the count on our device */ usb_autopm_put_interface(dev->interface); kref_put(&dev->kref, lcd_delete); return 0; } static ssize_t lcd_read(struct file *file, char __user * buffer, size_t count, loff_t *ppos) { struct usb_lcd *dev; int retval = 0; int bytes_read; dev = file->private_data; down_read(&dev->io_rwsem); if (dev->disconnected) { retval = -ENODEV; goto out_up_io; } /* do a blocking bulk read to get data from the device */ retval = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr), dev->bulk_in_buffer, min(dev->bulk_in_size, count), &bytes_read, 10000); /* if the read was successful, copy the data to userspace */ if (!retval) { if (copy_to_user(buffer, dev->bulk_in_buffer, bytes_read)) retval = -EFAULT; else retval = bytes_read; } out_up_io: up_read(&dev->io_rwsem); return retval; } static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct usb_lcd *dev; u16 bcdDevice; char buf[30]; dev = file->private_data; if (dev == NULL) return -ENODEV; switch (cmd) { case IOCTL_GET_HARD_VERSION: bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice); sprintf(buf, "%1d%1d.%1d%1d", (bcdDevice & 0xF000)>>12, (bcdDevice & 0xF00)>>8, (bcdDevice & 0xF0)>>4, (bcdDevice & 0xF)); if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0) return -EFAULT; break; case IOCTL_GET_DRV_VERSION: sprintf(buf, DRIVER_VERSION); if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0) return -EFAULT; break; default: return -ENOTTY; } return 0; } static void lcd_write_bulk_callback(struct urb *urb) { struct usb_lcd *dev; int status = urb->status; dev = urb->context; /* sync/async unlink faults aren't errors */ if (status && !(status == -ENOENT || status == -ECONNRESET || status == -ESHUTDOWN)) { dev_dbg(&dev->interface->dev, "nonzero write bulk status received: %d\n", status); } /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); up(&dev->limit_sem); } static ssize_t lcd_write(struct file *file, const char __user * user_buffer, size_t count, loff_t *ppos) { struct usb_lcd *dev; int retval = 0, r; struct urb *urb = NULL; char *buf = NULL; dev = file->private_data; /* verify that we actually have some data to write */ if (count == 0) goto exit; r = down_interruptible(&dev->limit_sem); if (r < 0) return -EINTR; down_read(&dev->io_rwsem); if (dev->disconnected) { retval = -ENODEV; goto err_up_io; } /* create a urb, and a buffer for it, and copy the data to the urb */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto err_up_io; } buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL, &urb->transfer_dma); if (!buf) { retval = -ENOMEM; goto error; } if (copy_from_user(buf, user_buffer, count)) { retval = -EFAULT; goto error; } /* initialize the urb properly */ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr), buf, count, lcd_write_bulk_callback, dev); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->submitted); /* send the data out the bulk port */ retval = usb_submit_urb(urb, GFP_KERNEL); if (retval) { dev_err(&dev->udev->dev, "%s - failed submitting write urb, error %d\n", __func__, retval); goto error_unanchor; } /* release our reference to this urb, the USB core will eventually free it entirely */ usb_free_urb(urb); up_read(&dev->io_rwsem); exit: return count; error_unanchor: usb_unanchor_urb(urb); error: usb_free_coherent(dev->udev, count, buf, urb->transfer_dma); usb_free_urb(urb); err_up_io: up_read(&dev->io_rwsem); up(&dev->limit_sem); return retval; } static const struct file_operations lcd_fops = { .owner = THIS_MODULE, .read = lcd_read, .write = lcd_write, .open = lcd_open, .unlocked_ioctl = lcd_ioctl, .release = lcd_release, .llseek = noop_llseek, }; /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with the driver core */ static struct usb_class_driver lcd_class = { .name = "lcd%d", .fops = &lcd_fops, .minor_base = USBLCD_MINOR, }; static int lcd_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_lcd *dev = NULL; struct usb_endpoint_descriptor *bulk_in, *bulk_out; int i; int retval; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; kref_init(&dev->kref); sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES); init_rwsem(&dev->io_rwsem); init_usb_anchor(&dev->submitted); dev->udev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; if (le16_to_cpu(dev->udev->descriptor.idProduct) != 0x0001) { dev_warn(&interface->dev, "USBLCD model not supported.\n"); retval = -ENODEV; goto error; } /* set up the endpoint information */ /* use only the first bulk-in and bulk-out endpoints */ retval = usb_find_common_endpoints(interface->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL); if (retval) { dev_err(&interface->dev, "Could not find both bulk-in and bulk-out endpoints\n"); goto error; } dev->bulk_in_size = usb_endpoint_maxp(bulk_in); dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress; dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL); if (!dev->bulk_in_buffer) { retval = -ENOMEM; goto error; } dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress; /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* we can register the device now, as it is ready */ retval = usb_register_dev(interface, &lcd_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); goto error; } i = le16_to_cpu(dev->udev->descriptor.bcdDevice); dev_info(&interface->dev, "USBLCD Version %1d%1d.%1d%1d found " "at address %d\n", (i & 0xF000)>>12, (i & 0xF00)>>8, (i & 0xF0)>>4, (i & 0xF), dev->udev->devnum); /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "USB LCD device now attached to USBLCD-%d\n", interface->minor); return 0; error: kref_put(&dev->kref, lcd_delete); return retval; } static void lcd_draw_down(struct usb_lcd *dev) { int time; time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000); if (!time) usb_kill_anchored_urbs(&dev->submitted); } static int lcd_suspend(struct usb_interface *intf, pm_message_t message) { struct usb_lcd *dev = usb_get_intfdata(intf); if (!dev) return 0; lcd_draw_down(dev); return 0; } static int lcd_resume(struct usb_interface *intf) { return 0; } static void lcd_disconnect(struct usb_interface *interface) { struct usb_lcd *dev = usb_get_intfdata(interface); int minor = interface->minor; /* give back our minor */ usb_deregister_dev(interface, &lcd_class); down_write(&dev->io_rwsem); dev->disconnected = 1; up_write(&dev->io_rwsem); usb_kill_anchored_urbs(&dev->submitted); /* decrement our usage count */ kref_put(&dev->kref, lcd_delete); dev_info(&interface->dev, "USB LCD #%d now disconnected\n", minor); } static struct usb_driver lcd_driver = { .name = "usblcd", .probe = lcd_probe, .disconnect = lcd_disconnect, .suspend = lcd_suspend, .resume = lcd_resume, .id_table = id_table, .supports_autosuspend = 1, }; module_usb_driver(lcd_driver); MODULE_AUTHOR("Georges Toth <g.toth@e-biz.lu>"); MODULE_DESCRIPTION(DRIVER_VERSION); MODULE_LICENSE("GPL"); |
2 2 2 1 1 2 2 2 3 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 4 4 4 4 4 1 3 3 3 3 4 3 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for the Prodikeys PC-MIDI Keyboard * providing midi & extra multimedia keys functionality * * Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk> * * Controls for Octave Shift Up/Down, Channel, and * Sustain Duration available via sysfs. */ /* */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/hid.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include "hid-ids.h" #define pk_debug(format, arg...) \ pr_debug("hid-prodikeys: " format "\n" , ## arg) #define pk_error(format, arg...) \ pr_err("hid-prodikeys: " format "\n" , ## arg) struct pcmidi_snd; struct pcmidi_sustain { unsigned long in_use; struct pcmidi_snd *pm; struct timer_list timer; unsigned char status; unsigned char note; unsigned char velocity; }; #define PCMIDI_SUSTAINED_MAX 32 struct pcmidi_snd { struct hid_device *hdev; unsigned short ifnum; struct hid_report *pcmidi_report6; struct input_dev *input_ep82; unsigned short midi_mode; unsigned short midi_sustain_mode; unsigned short midi_sustain; unsigned short midi_channel; short midi_octave; struct pcmidi_sustain sustained_notes[PCMIDI_SUSTAINED_MAX]; unsigned short fn_state; unsigned short last_key[24]; spinlock_t rawmidi_in_lock; struct snd_card *card; struct snd_rawmidi *rwmidi; struct snd_rawmidi_substream *in_substream; unsigned long in_triggered; }; #define PK_QUIRK_NOGET 0x00010000 #define PCMIDI_MIDDLE_C 60 #define PCMIDI_CHANNEL_MIN 0 #define PCMIDI_CHANNEL_MAX 15 #define PCMIDI_OCTAVE_MIN (-2) #define PCMIDI_OCTAVE_MAX 2 #define PCMIDI_SUSTAIN_MIN 0 #define PCMIDI_SUSTAIN_MAX 5000 static const char shortname[] = "PC-MIDI"; static const char longname[] = "Prodikeys PC-MIDI Keyboard"; static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); module_param_array(id, charp, NULL, 0444); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver"); /* Output routine for the sysfs channel file */ static ssize_t show_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read channel=%u\n", pm->midi_channel); return sprintf(buf, "%u (min:%u, max:%u)\n", pm->midi_channel, PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX); } /* Input routine for the sysfs channel file */ static ssize_t store_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); unsigned channel = 0; if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) { dbg_hid("pcmidi sysfs write channel=%u\n", channel); pm->midi_channel = channel; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(channel, S_IRUGO | S_IWUSR | S_IWGRP , show_channel, store_channel); static struct device_attribute *sysfs_device_attr_channel = { &dev_attr_channel, }; /* Output routine for the sysfs sustain file */ static ssize_t show_sustain(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read sustain=%u\n", pm->midi_sustain); return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pm->midi_sustain, PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX); } /* Input routine for the sysfs sustain file */ static ssize_t store_sustain(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); unsigned sustain = 0; if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) { dbg_hid("pcmidi sysfs write sustain=%u\n", sustain); pm->midi_sustain = sustain; pm->midi_sustain_mode = (0 == sustain || !pm->midi_mode) ? 0 : 1; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(sustain, S_IRUGO | S_IWUSR | S_IWGRP, show_sustain, store_sustain); static struct device_attribute *sysfs_device_attr_sustain = { &dev_attr_sustain, }; /* Output routine for the sysfs octave file */ static ssize_t show_octave(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read octave=%d\n", pm->midi_octave); return sprintf(buf, "%d (min:%d, max:%d)\n", pm->midi_octave, PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX); } /* Input routine for the sysfs octave file */ static ssize_t store_octave(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct pcmidi_snd *pm = hid_get_drvdata(hdev); int octave = 0; if (sscanf(buf, "%d", &octave) > 0 && octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) { dbg_hid("pcmidi sysfs write octave=%d\n", octave); pm->midi_octave = octave; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(octave, S_IRUGO | S_IWUSR | S_IWGRP, show_octave, store_octave); static struct device_attribute *sysfs_device_attr_octave = { &dev_attr_octave, }; static void pcmidi_send_note(struct pcmidi_snd *pm, unsigned char status, unsigned char note, unsigned char velocity) { unsigned long flags; unsigned char buffer[3]; buffer[0] = status; buffer[1] = note; buffer[2] = velocity; spin_lock_irqsave(&pm->rawmidi_in_lock, flags); if (!pm->in_substream) goto drop_note; if (!test_bit(pm->in_substream->number, &pm->in_triggered)) goto drop_note; snd_rawmidi_receive(pm->in_substream, buffer, 3); drop_note: spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags); return; } static void pcmidi_sustained_note_release(struct timer_list *t) { struct pcmidi_sustain *pms = timer_container_of(pms, t, timer); pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity); pms->in_use = 0; } static void init_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 0; pms->pm = pm; timer_setup(&pms->timer, pcmidi_sustained_note_release, 0); } } static void stop_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 1; timer_delete_sync(&pms->timer); } } static int pcmidi_get_output_report(struct pcmidi_snd *pm) { struct hid_device *hdev = pm->hdev; struct hid_report *report; list_for_each_entry(report, &hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) { if (!(6 == report->id)) continue; if (report->maxfield < 1) { hid_err(hdev, "output report is empty\n"); break; } if (report->field[0]->report_count != 2) { hid_err(hdev, "field count too low\n"); break; } pm->pcmidi_report6 = report; return 0; } /* should never get here */ return -ENODEV; } static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state) { struct hid_device *hdev = pm->hdev; struct hid_report *report = pm->pcmidi_report6; report->field[0]->value[0] = 0x01; report->field[0]->value[1] = state; hid_hw_request(hdev, report, HID_REQ_SET_REPORT); } static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data) { u32 bit_mask; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); /*KEY_MAIL or octave down*/ if (pm->midi_mode && bit_mask == 0x004000) { /* octave down */ pm->midi_octave--; if (pm->midi_octave < -2) pm->midi_octave = -2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); return 1; } /*KEY_WWW or sustain*/ else if (pm->midi_mode && bit_mask == 0x000004) { /* sustain on/off*/ pm->midi_sustain_mode ^= 0x1; return 1; } return 0; /* continue key processing */ } static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size) { struct pcmidi_sustain *pms; unsigned i, j; unsigned char status, note, velocity; unsigned num_notes = (size-1)/2; for (j = 0; j < num_notes; j++) { note = data[j*2+1]; velocity = data[j*2+2]; if (note < 0x81) { /* note on */ status = 128 + 16 + pm->midi_channel; /* 1001nnnn */ note = note - 0x54 + PCMIDI_MIDDLE_C + (pm->midi_octave * 12); if (0 == velocity) velocity = 1; /* force note on */ } else { /* note off */ status = 128 + pm->midi_channel; /* 1000nnnn */ note = note - 0x94 + PCMIDI_MIDDLE_C + (pm->midi_octave*12); if (pm->midi_sustain_mode) { for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; if (!pms->in_use) { pms->status = status; pms->note = note; pms->velocity = velocity; pms->in_use = 1; mod_timer(&pms->timer, jiffies + msecs_to_jiffies(pm->midi_sustain)); return 1; } } } } pcmidi_send_note(pm, status, note, velocity); } return 1; } static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data) { unsigned key; u32 bit_mask; u32 bit_index; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; /* break keys */ for (bit_index = 0; bit_index < 24; bit_index++) { if (!((0x01 << bit_index) & bit_mask)) { input_event(pm->input_ep82, EV_KEY, pm->last_key[bit_index], 0); pm->last_key[bit_index] = 0; } } /* make keys */ for (bit_index = 0; bit_index < 24; bit_index++) { key = 0; switch ((0x01 << bit_index) & bit_mask) { case 0x000010: /* Fn lock*/ pm->fn_state ^= 0x000010; if (pm->fn_state) pcmidi_submit_output_report(pm, 0xc5); else pcmidi_submit_output_report(pm, 0xc6); continue; case 0x020000: /* midi launcher..send a key (qwerty) or not? */ pcmidi_submit_output_report(pm, 0xc1); pm->midi_mode ^= 0x01; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); continue; case 0x100000: /* KEY_MESSENGER or octave up */ dbg_hid("pcmidi mode: %d\n", pm->midi_mode); if (pm->midi_mode) { pm->midi_octave++; if (pm->midi_octave > 2) pm->midi_octave = 2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); continue; } else key = KEY_MESSENGER; break; case 0x400000: key = KEY_CALENDAR; break; case 0x080000: key = KEY_ADDRESSBOOK; break; case 0x040000: key = KEY_DOCUMENTS; break; case 0x800000: key = KEY_WORDPROCESSOR; break; case 0x200000: key = KEY_SPREADSHEET; break; case 0x010000: key = KEY_COFFEE; break; case 0x000100: key = KEY_HELP; break; case 0x000200: key = KEY_SEND; break; case 0x000400: key = KEY_REPLY; break; case 0x000800: key = KEY_FORWARDMAIL; break; case 0x001000: key = KEY_NEW; break; case 0x002000: key = KEY_OPEN; break; case 0x004000: key = KEY_CLOSE; break; case 0x008000: key = KEY_SAVE; break; case 0x000001: key = KEY_UNDO; break; case 0x000002: key = KEY_REDO; break; case 0x000004: key = KEY_SPELLCHECK; break; case 0x000008: key = KEY_PRINT; break; } if (key) { input_event(pm->input_ep82, EV_KEY, key, 1); pm->last_key[bit_index] = key; } } return 1; } static int pcmidi_handle_report( struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size) { int ret = 0; switch (report_id) { case 0x01: /* midi keys (qwerty)*/ ret = pcmidi_handle_report1(pm, data); break; case 0x03: /* midi keyboard (musical)*/ ret = pcmidi_handle_report3(pm, data, size); break; case 0x04: /* multimedia/midi keys (qwerty)*/ ret = pcmidi_handle_report4(pm, data); break; } return ret; } static void pcmidi_setup_extra_keys( struct pcmidi_snd *pm, struct input_dev *input) { /* reassigned functionality for N/A keys MY PICTURES => KEY_WORDPROCESSOR MY MUSIC=> KEY_SPREADSHEET */ static const unsigned int keys[] = { KEY_FN, KEY_MESSENGER, KEY_CALENDAR, KEY_ADDRESSBOOK, KEY_DOCUMENTS, KEY_WORDPROCESSOR, KEY_SPREADSHEET, KEY_COFFEE, KEY_HELP, KEY_SEND, KEY_REPLY, KEY_FORWARDMAIL, KEY_NEW, KEY_OPEN, KEY_CLOSE, KEY_SAVE, KEY_UNDO, KEY_REDO, KEY_SPELLCHECK, KEY_PRINT, 0 }; const unsigned int *pkeys = &keys[0]; unsigned short i; if (pm->ifnum != 1) /* only set up ONCE for interace 1 */ return; pm->input_ep82 = input; for (i = 0; i < 24; i++) pm->last_key[i] = 0; while (*pkeys != 0) { set_bit(*pkeys, pm->input_ep82->keybit); ++pkeys; } } static int pcmidi_set_operational(struct pcmidi_snd *pm) { int rc; if (pm->ifnum != 1) return 0; /* only set up ONCE for interace 1 */ rc = pcmidi_get_output_report(pm); if (rc < 0) return rc; pcmidi_submit_output_report(pm, 0xc1); return 0; } static int pcmidi_snd_free(struct snd_device *dev) { return 0; } static int pcmidi_in_open(struct snd_rawmidi_substream *substream) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in open\n"); pm->in_substream = substream; return 0; } static int pcmidi_in_close(struct snd_rawmidi_substream *substream) { dbg_hid("pcmidi in close\n"); return 0; } static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in trigger %d\n", up); pm->in_triggered = up; } static const struct snd_rawmidi_ops pcmidi_in_ops = { .open = pcmidi_in_open, .close = pcmidi_in_close, .trigger = pcmidi_in_trigger }; static int pcmidi_snd_initialise(struct pcmidi_snd *pm) { static int dev; struct snd_card *card; struct snd_rawmidi *rwmidi; int err; static struct snd_device_ops ops = { .dev_free = pcmidi_snd_free, }; if (pm->ifnum != 1) return 0; /* only set up midi device ONCE for interace 1 */ if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } /* Setup sound card */ err = snd_card_new(&pm->hdev->dev, index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) { pk_error("failed to create pc-midi sound card\n"); err = -ENOMEM; goto fail; } pm->card = card; /* Setup sound device */ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops); if (err < 0) { pk_error("failed to create pc-midi sound device: error %d\n", err); goto fail; } strscpy(card->driver, shortname, sizeof(card->driver)); strscpy(card->shortname, shortname, sizeof(card->shortname)); strscpy(card->longname, longname, sizeof(card->longname)); /* Set up rawmidi */ err = snd_rawmidi_new(card, card->shortname, 0, 0, 1, &rwmidi); if (err < 0) { pk_error("failed to create pc-midi rawmidi device: error %d\n", err); goto fail; } pm->rwmidi = rwmidi; strscpy(rwmidi->name, card->shortname, sizeof(rwmidi->name)); rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT; rwmidi->private_data = pm; snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT, &pcmidi_in_ops); /* create sysfs variables */ err = device_create_file(&pm->hdev->dev, sysfs_device_attr_channel); if (err < 0) { pk_error("failed to create sysfs attribute channel: error %d\n", err); goto fail; } err = device_create_file(&pm->hdev->dev, sysfs_device_attr_sustain); if (err < 0) { pk_error("failed to create sysfs attribute sustain: error %d\n", err); goto fail_attr_sustain; } err = device_create_file(&pm->hdev->dev, sysfs_device_attr_octave); if (err < 0) { pk_error("failed to create sysfs attribute octave: error %d\n", err); goto fail_attr_octave; } spin_lock_init(&pm->rawmidi_in_lock); init_sustain_timers(pm); err = pcmidi_set_operational(pm); if (err < 0) { pk_error("failed to find output report\n"); goto fail_register; } /* register it */ err = snd_card_register(card); if (err < 0) { pk_error("failed to register pc-midi sound card: error %d\n", err); goto fail_register; } dbg_hid("pcmidi_snd_initialise finished ok\n"); return 0; fail_register: stop_sustain_timers(pm); device_remove_file(&pm->hdev->dev, sysfs_device_attr_octave); fail_attr_octave: device_remove_file(&pm->hdev->dev, sysfs_device_attr_sustain); fail_attr_sustain: device_remove_file(&pm->hdev->dev, sysfs_device_attr_channel); fail: if (pm->card) { snd_card_free(pm->card); pm->card = NULL; } return err; } static int pcmidi_snd_terminate(struct pcmidi_snd *pm) { if (pm->card) { stop_sustain_timers(pm); device_remove_file(&pm->hdev->dev, sysfs_device_attr_channel); device_remove_file(&pm->hdev->dev, sysfs_device_attr_sustain); device_remove_file(&pm->hdev->dev, sysfs_device_attr_octave); snd_card_disconnect(pm->card); snd_card_free_when_closed(pm->card); } return 0; } /* * PC-MIDI report descriptor for report id is wrong. */ static const __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 178 && rdesc[111] == 0x06 && rdesc[112] == 0x00 && rdesc[113] == 0xff) { hid_info(hdev, "fixing up pc-midi keyboard report descriptor\n"); rdesc[144] = 0x18; /* report 4: was 0x10 report count */ } return rdesc; } static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct pcmidi_snd *pm = hid_get_drvdata(hdev); if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) && 1 == pm->ifnum) { pcmidi_setup_extra_keys(pm, hi->input); return 0; } return 0; } static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct pcmidi_snd *pm = hid_get_drvdata(hdev); int ret = 0; if (1 == pm->ifnum) { if (report->id == data[0]) switch (report->id) { case 0x01: /* midi keys (qwerty)*/ case 0x03: /* midi keyboard (musical)*/ case 0x04: /* extra/midi keys (qwerty)*/ ret = pcmidi_handle_report(pm, report->id, data, size); break; } } return ret; } static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct usb_interface *intf; unsigned short ifnum; unsigned long quirks = id->driver_data; struct pcmidi_snd *pm; if (!hid_is_usb(hdev)) return -EINVAL; intf = to_usb_interface(hdev->dev.parent); ifnum = intf->cur_altsetting->desc.bInterfaceNumber; pm = kzalloc(sizeof(*pm), GFP_KERNEL); if (pm == NULL) { hid_err(hdev, "can't alloc descriptor\n"); return -ENOMEM; } pm->hdev = hdev; pm->ifnum = ifnum; hid_set_drvdata(hdev, pm); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "hid parse failed\n"); goto err_free; } if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */ hdev->quirks |= HID_QUIRK_NOGET; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } ret = pcmidi_snd_initialise(pm); if (ret < 0) goto err_stop; return 0; err_stop: hid_hw_stop(hdev); err_free: kfree(pm); return ret; } static void pk_remove(struct hid_device *hdev) { struct pcmidi_snd *pm = hid_get_drvdata(hdev); pcmidi_snd_terminate(pm); hid_hw_stop(hdev); kfree(pm); } static const struct hid_device_id pk_devices[] = { {HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI), .driver_data = PK_QUIRK_NOGET}, { } }; MODULE_DEVICE_TABLE(hid, pk_devices); static struct hid_driver pk_driver = { .name = "prodikeys", .id_table = pk_devices, .report_fixup = pk_report_fixup, .input_mapping = pk_input_mapping, .raw_event = pk_raw_event, .probe = pk_probe, .remove = pk_remove, }; module_hid_driver(pk_driver); MODULE_DESCRIPTION("HID driver for the Prodikeys PC-MIDI Keyboard"); MODULE_LICENSE("GPL"); |
3214 3240 3227 3240 3233 3233 3240 3232 3232 28 28 182 183 128 128 128 128 128 128 56 56 56 56 55 56 56 3206 3205 3202 3210 3212 3200 3203 3205 3205 3193 3207 3211 3197 3202 3196 3205 3206 3204 3185 3183 3131 3126 3129 3136 3127 3128 3127 127 127 28 127 126 127 127 127 126 126 126 127 127 127 127 28 28 28 28 28 3197 3126 3217 3219 126 127 126 127 127 126 62 62 61 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 | // SPDX-License-Identifier: GPL-2.0 /* * bus.c - bus driver management * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2007 Novell Inc. * Copyright (c) 2023 Greg Kroah-Hartman <gregkh@linuxfoundation.org> */ #include <linux/async.h> #include <linux/device/bus.h> #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/string.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include "base.h" #include "power/power.h" /* /sys/devices/system */ static struct kset *system_kset; /* /sys/bus */ static struct kset *bus_kset; #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr) /* * sysfs bindings for drivers */ #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr) #define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ struct driver_attribute driver_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) static int __must_check bus_rescan_devices_helper(struct device *dev, void *data); /** * bus_to_subsys - Turn a struct bus_type into a struct subsys_private * * @bus: pointer to the struct bus_type to look up * * The driver core internals needs to work on the subsys_private structure, not * the external struct bus_type pointer. This function walks the list of * registered busses in the system and finds the matching one and returns the * internal struct subsys_private that relates to that bus. * * Note, the reference count of the return value is INCREMENTED if it is not * NULL. A call to subsys_put() must be done when finished with the pointer in * order for it to be properly freed. */ struct subsys_private *bus_to_subsys(const struct bus_type *bus) { struct subsys_private *sp = NULL; struct kobject *kobj; if (!bus || !bus_kset) return NULL; spin_lock(&bus_kset->list_lock); if (list_empty(&bus_kset->list)) goto done; list_for_each_entry(kobj, &bus_kset->list, entry) { struct kset *kset = container_of(kobj, struct kset, kobj); sp = container_of_const(kset, struct subsys_private, subsys); if (sp->bus == bus) goto done; } sp = NULL; done: sp = subsys_get(sp); spin_unlock(&bus_kset->list_lock); return sp; } static const struct bus_type *bus_get(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); if (sp) return bus; return NULL; } static void bus_put(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); /* two puts are required as the call to bus_to_subsys incremented it again */ subsys_put(sp); subsys_put(sp); } static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct driver_attribute *drv_attr = to_drv_attr(attr); struct driver_private *drv_priv = to_driver(kobj); ssize_t ret = -EIO; if (drv_attr->show) ret = drv_attr->show(drv_priv->driver, buf); return ret; } static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct driver_attribute *drv_attr = to_drv_attr(attr); struct driver_private *drv_priv = to_driver(kobj); ssize_t ret = -EIO; if (drv_attr->store) ret = drv_attr->store(drv_priv->driver, buf, count); return ret; } static const struct sysfs_ops driver_sysfs_ops = { .show = drv_attr_show, .store = drv_attr_store, }; static void driver_release(struct kobject *kobj) { struct driver_private *drv_priv = to_driver(kobj); pr_debug("driver: '%s': %s\n", kobject_name(kobj), __func__); kfree(drv_priv); } static const struct kobj_type driver_ktype = { .sysfs_ops = &driver_sysfs_ops, .release = driver_release, }; /* * sysfs bindings for buses */ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct bus_attribute *bus_attr = to_bus_attr(attr); struct subsys_private *subsys_priv = to_subsys_private(kobj); /* return -EIO for reading a bus attribute without show() */ ssize_t ret = -EIO; if (bus_attr->show) ret = bus_attr->show(subsys_priv->bus, buf); return ret; } static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct bus_attribute *bus_attr = to_bus_attr(attr); struct subsys_private *subsys_priv = to_subsys_private(kobj); /* return -EIO for writing a bus attribute without store() */ ssize_t ret = -EIO; if (bus_attr->store) ret = bus_attr->store(subsys_priv->bus, buf, count); return ret; } static const struct sysfs_ops bus_sysfs_ops = { .show = bus_attr_show, .store = bus_attr_store, }; int bus_create_file(const struct bus_type *bus, struct bus_attribute *attr) { struct subsys_private *sp = bus_to_subsys(bus); int error; if (!sp) return -EINVAL; error = sysfs_create_file(&sp->subsys.kobj, &attr->attr); subsys_put(sp); return error; } EXPORT_SYMBOL_GPL(bus_create_file); void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr) { struct subsys_private *sp = bus_to_subsys(bus); if (!sp) return; sysfs_remove_file(&sp->subsys.kobj, &attr->attr); subsys_put(sp); } EXPORT_SYMBOL_GPL(bus_remove_file); static void bus_release(struct kobject *kobj) { struct subsys_private *priv = to_subsys_private(kobj); lockdep_unregister_key(&priv->lock_key); kfree(priv); } static const struct kobj_type bus_ktype = { .sysfs_ops = &bus_sysfs_ops, .release = bus_release, }; static int bus_uevent_filter(const struct kobject *kobj) { const struct kobj_type *ktype = get_ktype(kobj); if (ktype == &bus_ktype) return 1; return 0; } static const struct kset_uevent_ops bus_uevent_ops = { .filter = bus_uevent_filter, }; /* Manually detach a device from its associated driver. */ static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count) { const struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == drv) { device_driver_detach(dev); err = count; } put_device(dev); bus_put(bus); return err; } static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store); /* * Manually attach a device to a driver. * Note: the driver must want to bind to the device, * it is not possible to override the driver's id table. */ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count) { const struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; dev = bus_find_device_by_name(bus, NULL, buf); if (dev && driver_match_device(drv, dev)) { err = device_driver_attach(drv, dev); if (!err) { /* success */ err = count; } } put_device(dev); bus_put(bus); return err; } static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store); static ssize_t drivers_autoprobe_show(const struct bus_type *bus, char *buf) { struct subsys_private *sp = bus_to_subsys(bus); int ret; if (!sp) return -EINVAL; ret = sysfs_emit(buf, "%d\n", sp->drivers_autoprobe); subsys_put(sp); return ret; } static ssize_t drivers_autoprobe_store(const struct bus_type *bus, const char *buf, size_t count) { struct subsys_private *sp = bus_to_subsys(bus); if (!sp) return -EINVAL; if (buf[0] == '0') sp->drivers_autoprobe = 0; else sp->drivers_autoprobe = 1; subsys_put(sp); return count; } static ssize_t drivers_probe_store(const struct bus_type *bus, const char *buf, size_t count) { struct device *dev; int err = -EINVAL; dev = bus_find_device_by_name(bus, NULL, buf); if (!dev) return -ENODEV; if (bus_rescan_devices_helper(dev, NULL) == 0) err = count; put_device(dev); return err; } static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct device *dev = NULL; struct device_private *dev_prv; if (n) { dev_prv = to_device_private_bus(n); dev = dev_prv->device; } return dev; } /** * bus_for_each_dev - device iterator. * @bus: bus type. * @start: device to start iterating from. * @data: data for the callback. * @fn: function to be called for each device. * * Iterate over @bus's list of devices, and call @fn for each, * passing it @data. If @start is not NULL, we use that device to * begin iterating from. * * We check the return of @fn each time. If it returns anything * other than 0, we break out and return that value. * * NOTE: The device that returns a non-zero value is not retained * in any way, nor is its refcount incremented. If the caller needs * to retain this data, it should do so, and increment the reference * count in the supplied callback. */ int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data, device_iter_t fn) { struct subsys_private *sp = bus_to_subsys(bus); struct klist_iter i; struct device *dev; int error = 0; if (!sp) return -EINVAL; klist_iter_init_node(&sp->klist_devices, &i, (start ? &start->p->knode_bus : NULL)); while (!error && (dev = next_device(&i))) error = fn(dev, data); klist_iter_exit(&i); subsys_put(sp); return error; } EXPORT_SYMBOL_GPL(bus_for_each_dev); /** * bus_find_device - device iterator for locating a particular device. * @bus: bus type * @start: Device to begin with * @data: Data to pass to match function * @match: Callback function to check device * * This is similar to the bus_for_each_dev() function above, but it * returns a reference to a device that is 'found' for later use, as * determined by the @match callback. * * The callback should return 0 if the device doesn't match and non-zero * if it does. If the callback returns non-zero, this function will * return to the caller and not iterate over any more devices. */ struct device *bus_find_device(const struct bus_type *bus, struct device *start, const void *data, device_match_t match) { struct subsys_private *sp = bus_to_subsys(bus); struct klist_iter i; struct device *dev; if (!sp) return NULL; klist_iter_init_node(&sp->klist_devices, &i, (start ? &start->p->knode_bus : NULL)); while ((dev = next_device(&i))) { if (match(dev, data)) { get_device(dev); break; } } klist_iter_exit(&i); subsys_put(sp); return dev; } EXPORT_SYMBOL_GPL(bus_find_device); static struct device_driver *next_driver(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct driver_private *drv_priv; if (n) { drv_priv = container_of(n, struct driver_private, knode_bus); return drv_priv->driver; } return NULL; } /** * bus_for_each_drv - driver iterator * @bus: bus we're dealing with. * @start: driver to start iterating on. * @data: data to pass to the callback. * @fn: function to call for each driver. * * This is nearly identical to the device iterator above. * We iterate over each driver that belongs to @bus, and call * @fn for each. If @fn returns anything but 0, we break out * and return it. If @start is not NULL, we use it as the head * of the list. * * NOTE: we don't return the driver that returns a non-zero * value, nor do we leave the reference count incremented for that * driver. If the caller needs to know that info, it must set it * in the callback. It must also be sure to increment the refcount * so it doesn't disappear before returning to the caller. */ int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start, void *data, int (*fn)(struct device_driver *, void *)) { struct subsys_private *sp = bus_to_subsys(bus); struct klist_iter i; struct device_driver *drv; int error = 0; if (!sp) return -EINVAL; klist_iter_init_node(&sp->klist_drivers, &i, start ? &start->p->knode_bus : NULL); while ((drv = next_driver(&i)) && !error) error = fn(drv, data); klist_iter_exit(&i); subsys_put(sp); return error; } EXPORT_SYMBOL_GPL(bus_for_each_drv); /** * bus_add_device - add device to bus * @dev: device being added * * - Add device's bus attributes. * - Create links to device's bus. * - Add the device to its bus's list of devices. */ int bus_add_device(struct device *dev) { struct subsys_private *sp = bus_to_subsys(dev->bus); int error; if (!sp) { /* * This is a normal operation for many devices that do not * have a bus assigned to them, just say that all went * well. */ return 0; } /* * Reference in sp is now incremented and will be dropped when * the device is removed from the bus */ pr_debug("bus: '%s': add device %s\n", sp->bus->name, dev_name(dev)); error = device_add_groups(dev, sp->bus->dev_groups); if (error) goto out_put; error = sysfs_create_link(&sp->devices_kset->kobj, &dev->kobj, dev_name(dev)); if (error) goto out_groups; error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem"); if (error) goto out_subsys; klist_add_tail(&dev->p->knode_bus, &sp->klist_devices); return 0; out_subsys: sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev)); out_groups: device_remove_groups(dev, sp->bus->dev_groups); out_put: subsys_put(sp); return error; } /** * bus_probe_device - probe drivers for a new device * @dev: device to probe * * - Automatically probe for a driver if the bus allows it. */ void bus_probe_device(struct device *dev) { struct subsys_private *sp = bus_to_subsys(dev->bus); struct subsys_interface *sif; if (!sp) return; if (sp->drivers_autoprobe) device_initial_probe(dev); mutex_lock(&sp->mutex); list_for_each_entry(sif, &sp->interfaces, node) if (sif->add_dev) sif->add_dev(dev, sif); mutex_unlock(&sp->mutex); subsys_put(sp); } /** * bus_remove_device - remove device from bus * @dev: device to be removed * * - Remove device from all interfaces. * - Remove symlink from bus' directory. * - Delete device from bus's list. * - Detach from its driver. * - Drop reference taken in bus_add_device(). */ void bus_remove_device(struct device *dev) { struct subsys_private *sp = bus_to_subsys(dev->bus); struct subsys_interface *sif; if (!sp) return; mutex_lock(&sp->mutex); list_for_each_entry(sif, &sp->interfaces, node) if (sif->remove_dev) sif->remove_dev(dev, sif); mutex_unlock(&sp->mutex); sysfs_remove_link(&dev->kobj, "subsystem"); sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev)); device_remove_groups(dev, dev->bus->dev_groups); if (klist_node_attached(&dev->p->knode_bus)) klist_del(&dev->p->knode_bus); pr_debug("bus: '%s': remove device %s\n", dev->bus->name, dev_name(dev)); device_release_driver(dev); /* * Decrement the reference count twice, once for the bus_to_subsys() * call in the start of this function, and the second one from the * reference increment in bus_add_device() */ subsys_put(sp); subsys_put(sp); } static int __must_check add_bind_files(struct device_driver *drv) { int ret; ret = driver_create_file(drv, &driver_attr_unbind); if (ret == 0) { ret = driver_create_file(drv, &driver_attr_bind); if (ret) driver_remove_file(drv, &driver_attr_unbind); } return ret; } static void remove_bind_files(struct device_driver *drv) { driver_remove_file(drv, &driver_attr_bind); driver_remove_file(drv, &driver_attr_unbind); } static BUS_ATTR_WO(drivers_probe); static BUS_ATTR_RW(drivers_autoprobe); static int add_probe_files(const struct bus_type *bus) { int retval; retval = bus_create_file(bus, &bus_attr_drivers_probe); if (retval) goto out; retval = bus_create_file(bus, &bus_attr_drivers_autoprobe); if (retval) bus_remove_file(bus, &bus_attr_drivers_probe); out: return retval; } static void remove_probe_files(const struct bus_type *bus) { bus_remove_file(bus, &bus_attr_drivers_autoprobe); bus_remove_file(bus, &bus_attr_drivers_probe); } static ssize_t uevent_store(struct device_driver *drv, const char *buf, size_t count) { int rc; rc = kobject_synth_uevent(&drv->p->kobj, buf, count); return rc ? rc : count; } static DRIVER_ATTR_WO(uevent); /** * bus_add_driver - Add a driver to the bus. * @drv: driver. */ int bus_add_driver(struct device_driver *drv) { struct subsys_private *sp = bus_to_subsys(drv->bus); struct driver_private *priv; int error = 0; if (!sp) return -EINVAL; /* * Reference in sp is now incremented and will be dropped when * the driver is removed from the bus */ pr_debug("bus: '%s': add driver %s\n", sp->bus->name, drv->name); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { error = -ENOMEM; goto out_put_bus; } klist_init(&priv->klist_devices, NULL, NULL); priv->driver = drv; drv->p = priv; priv->kobj.kset = sp->drivers_kset; error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL, "%s", drv->name); if (error) goto out_unregister; klist_add_tail(&priv->knode_bus, &sp->klist_drivers); if (sp->drivers_autoprobe) { error = driver_attach(drv); if (error) goto out_del_list; } error = module_add_driver(drv->owner, drv); if (error) { printk(KERN_ERR "%s: failed to create module links for %s\n", __func__, drv->name); goto out_detach; } error = driver_create_file(drv, &driver_attr_uevent); if (error) { printk(KERN_ERR "%s: uevent attr (%s) failed\n", __func__, drv->name); } error = driver_add_groups(drv, sp->bus->drv_groups); if (error) { /* How the hell do we get out of this pickle? Give up */ printk(KERN_ERR "%s: driver_add_groups(%s) failed\n", __func__, drv->name); } if (!drv->suppress_bind_attrs) { error = add_bind_files(drv); if (error) { /* Ditto */ printk(KERN_ERR "%s: add_bind_files(%s) failed\n", __func__, drv->name); } } return 0; out_detach: driver_detach(drv); out_del_list: klist_del(&priv->knode_bus); out_unregister: kobject_put(&priv->kobj); /* drv->p is freed in driver_release() */ drv->p = NULL; out_put_bus: subsys_put(sp); return error; } /** * bus_remove_driver - delete driver from bus's knowledge. * @drv: driver. * * Detach the driver from the devices it controls, and remove * it from its bus's list of drivers. Finally, we drop the reference * to the bus we took in bus_add_driver(). */ void bus_remove_driver(struct device_driver *drv) { struct subsys_private *sp = bus_to_subsys(drv->bus); if (!sp) return; pr_debug("bus: '%s': remove driver %s\n", sp->bus->name, drv->name); if (!drv->suppress_bind_attrs) remove_bind_files(drv); driver_remove_groups(drv, sp->bus->drv_groups); driver_remove_file(drv, &driver_attr_uevent); klist_remove(&drv->p->knode_bus); driver_detach(drv); module_remove_driver(drv); kobject_put(&drv->p->kobj); /* * Decrement the reference count twice, once for the bus_to_subsys() * call in the start of this function, and the second one from the * reference increment in bus_add_driver() */ subsys_put(sp); subsys_put(sp); } /* Helper for bus_rescan_devices's iter */ static int __must_check bus_rescan_devices_helper(struct device *dev, void *data) { int ret = 0; if (!dev->driver) { if (dev->parent && dev->bus->need_parent_lock) device_lock(dev->parent); ret = device_attach(dev); if (dev->parent && dev->bus->need_parent_lock) device_unlock(dev->parent); } return ret < 0 ? ret : 0; } /** * bus_rescan_devices - rescan devices on the bus for possible drivers * @bus: the bus to scan. * * This function will look for devices on the bus with no driver * attached and rescan it against existing drivers to see if it matches * any by calling device_attach() for the unbound devices. */ int bus_rescan_devices(const struct bus_type *bus) { return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper); } EXPORT_SYMBOL_GPL(bus_rescan_devices); /** * device_reprobe - remove driver for a device and probe for a new driver * @dev: the device to reprobe * * This function detaches the attached driver (if any) for the given * device and restarts the driver probing process. It is intended * to use if probing criteria changed during a devices lifetime and * driver attachment should change accordingly. */ int device_reprobe(struct device *dev) { if (dev->driver) device_driver_detach(dev); return bus_rescan_devices_helper(dev, NULL); } EXPORT_SYMBOL_GPL(device_reprobe); static void klist_devices_get(struct klist_node *n) { struct device_private *dev_prv = to_device_private_bus(n); struct device *dev = dev_prv->device; get_device(dev); } static void klist_devices_put(struct klist_node *n) { struct device_private *dev_prv = to_device_private_bus(n); struct device *dev = dev_prv->device; put_device(dev); } static ssize_t bus_uevent_store(const struct bus_type *bus, const char *buf, size_t count) { struct subsys_private *sp = bus_to_subsys(bus); int ret; if (!sp) return -EINVAL; ret = kobject_synth_uevent(&sp->subsys.kobj, buf, count); subsys_put(sp); if (ret) return ret; return count; } /* * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO() * here, but can not use it as earlier in the file we have * DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store * function name. */ static struct bus_attribute bus_attr_uevent = __ATTR(uevent, 0200, NULL, bus_uevent_store); /** * bus_register - register a driver-core subsystem * @bus: bus to register * * Once we have that, we register the bus with the kobject * infrastructure, then register the children subsystems it has: * the devices and drivers that belong to the subsystem. */ int bus_register(const struct bus_type *bus) { int retval; struct subsys_private *priv; struct kobject *bus_kobj; struct lock_class_key *key; priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL); if (!priv) return -ENOMEM; priv->bus = bus; BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier); bus_kobj = &priv->subsys.kobj; retval = kobject_set_name(bus_kobj, "%s", bus->name); if (retval) goto out; bus_kobj->kset = bus_kset; bus_kobj->ktype = &bus_ktype; priv->drivers_autoprobe = 1; retval = kset_register(&priv->subsys); if (retval) goto out; retval = bus_create_file(bus, &bus_attr_uevent); if (retval) goto bus_uevent_fail; priv->devices_kset = kset_create_and_add("devices", NULL, bus_kobj); if (!priv->devices_kset) { retval = -ENOMEM; goto bus_devices_fail; } priv->drivers_kset = kset_create_and_add("drivers", NULL, bus_kobj); if (!priv->drivers_kset) { retval = -ENOMEM; goto bus_drivers_fail; } INIT_LIST_HEAD(&priv->interfaces); key = &priv->lock_key; lockdep_register_key(key); __mutex_init(&priv->mutex, "subsys mutex", key); klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put); klist_init(&priv->klist_drivers, NULL, NULL); retval = add_probe_files(bus); if (retval) goto bus_probe_files_fail; retval = sysfs_create_groups(bus_kobj, bus->bus_groups); if (retval) goto bus_groups_fail; pr_debug("bus: '%s': registered\n", bus->name); return 0; bus_groups_fail: remove_probe_files(bus); bus_probe_files_fail: kset_unregister(priv->drivers_kset); bus_drivers_fail: kset_unregister(priv->devices_kset); bus_devices_fail: bus_remove_file(bus, &bus_attr_uevent); bus_uevent_fail: kset_unregister(&priv->subsys); /* Above kset_unregister() will kfree @priv */ priv = NULL; out: kfree(priv); return retval; } EXPORT_SYMBOL_GPL(bus_register); /** * bus_unregister - remove a bus from the system * @bus: bus. * * Unregister the child subsystems and the bus itself. * Finally, we call bus_put() to release the refcount */ void bus_unregister(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct kobject *bus_kobj; if (!sp) return; pr_debug("bus: '%s': unregistering\n", bus->name); if (sp->dev_root) device_unregister(sp->dev_root); bus_kobj = &sp->subsys.kobj; sysfs_remove_groups(bus_kobj, bus->bus_groups); remove_probe_files(bus); bus_remove_file(bus, &bus_attr_uevent); kset_unregister(sp->drivers_kset); kset_unregister(sp->devices_kset); kset_unregister(&sp->subsys); subsys_put(sp); } EXPORT_SYMBOL_GPL(bus_unregister); int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb) { struct subsys_private *sp = bus_to_subsys(bus); int retval; if (!sp) return -EINVAL; retval = blocking_notifier_chain_register(&sp->bus_notifier, nb); subsys_put(sp); return retval; } EXPORT_SYMBOL_GPL(bus_register_notifier); int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb) { struct subsys_private *sp = bus_to_subsys(bus); int retval; if (!sp) return -EINVAL; retval = blocking_notifier_chain_unregister(&sp->bus_notifier, nb); subsys_put(sp); return retval; } EXPORT_SYMBOL_GPL(bus_unregister_notifier); void bus_notify(struct device *dev, enum bus_notifier_event value) { struct subsys_private *sp = bus_to_subsys(dev->bus); if (!sp) return; blocking_notifier_call_chain(&sp->bus_notifier, value, dev); subsys_put(sp); } struct kset *bus_get_kset(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct kset *kset; if (!sp) return NULL; kset = &sp->subsys; subsys_put(sp); return kset; } EXPORT_SYMBOL_GPL(bus_get_kset); /* * Yes, this forcibly breaks the klist abstraction temporarily. It * just wants to sort the klist, not change reference counts and * take/drop locks rapidly in the process. It does all this while * holding the lock for the list, so objects can't otherwise be * added/removed while we're swizzling. */ static void device_insertion_sort_klist(struct device *a, struct list_head *list, int (*compare)(const struct device *a, const struct device *b)) { struct klist_node *n; struct device_private *dev_prv; struct device *b; list_for_each_entry(n, list, n_node) { dev_prv = to_device_private_bus(n); b = dev_prv->device; if (compare(a, b) <= 0) { list_move_tail(&a->p->knode_bus.n_node, &b->p->knode_bus.n_node); return; } } list_move_tail(&a->p->knode_bus.n_node, list); } void bus_sort_breadthfirst(const struct bus_type *bus, int (*compare)(const struct device *a, const struct device *b)) { struct subsys_private *sp = bus_to_subsys(bus); LIST_HEAD(sorted_devices); struct klist_node *n, *tmp; struct device_private *dev_prv; struct device *dev; struct klist *device_klist; if (!sp) return; device_klist = &sp->klist_devices; spin_lock(&device_klist->k_lock); list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) { dev_prv = to_device_private_bus(n); dev = dev_prv->device; device_insertion_sort_klist(dev, &sorted_devices, compare); } list_splice(&sorted_devices, &device_klist->k_list); spin_unlock(&device_klist->k_lock); subsys_put(sp); } EXPORT_SYMBOL_GPL(bus_sort_breadthfirst); struct subsys_dev_iter { struct klist_iter ki; const struct device_type *type; }; /** * subsys_dev_iter_init - initialize subsys device iterator * @iter: subsys iterator to initialize * @sp: the subsys private (i.e. bus) we wanna iterate over * @start: the device to start iterating from, if any * @type: device_type of the devices to iterate over, NULL for all * * Initialize subsys iterator @iter such that it iterates over devices * of @subsys. If @start is set, the list iteration will start there, * otherwise if it is NULL, the iteration starts at the beginning of * the list. */ static void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct subsys_private *sp, struct device *start, const struct device_type *type) { struct klist_node *start_knode = NULL; if (start) start_knode = &start->p->knode_bus; klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode); iter->type = type; } /** * subsys_dev_iter_next - iterate to the next device * @iter: subsys iterator to proceed * * Proceed @iter to the next device and return it. Returns NULL if * iteration is complete. * * The returned device is referenced and won't be released till * iterator is proceed to the next device or exited. The caller is * free to do whatever it wants to do with the device including * calling back into subsys code. */ static struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter) { struct klist_node *knode; struct device *dev; for (;;) { knode = klist_next(&iter->ki); if (!knode) return NULL; dev = to_device_private_bus(knode)->device; if (!iter->type || iter->type == dev->type) return dev; } } /** * subsys_dev_iter_exit - finish iteration * @iter: subsys iterator to finish * * Finish an iteration. Always call this function after iteration is * complete whether the iteration ran till the end or not. */ static void subsys_dev_iter_exit(struct subsys_dev_iter *iter) { klist_iter_exit(&iter->ki); } int subsys_interface_register(struct subsys_interface *sif) { struct subsys_private *sp; struct subsys_dev_iter iter; struct device *dev; if (!sif || !sif->subsys) return -ENODEV; sp = bus_to_subsys(sif->subsys); if (!sp) return -EINVAL; /* * Reference in sp is now incremented and will be dropped when * the interface is removed from the bus */ mutex_lock(&sp->mutex); list_add_tail(&sif->node, &sp->interfaces); if (sif->add_dev) { subsys_dev_iter_init(&iter, sp, NULL, NULL); while ((dev = subsys_dev_iter_next(&iter))) sif->add_dev(dev, sif); subsys_dev_iter_exit(&iter); } mutex_unlock(&sp->mutex); return 0; } EXPORT_SYMBOL_GPL(subsys_interface_register); void subsys_interface_unregister(struct subsys_interface *sif) { struct subsys_private *sp; struct subsys_dev_iter iter; struct device *dev; if (!sif || !sif->subsys) return; sp = bus_to_subsys(sif->subsys); if (!sp) return; mutex_lock(&sp->mutex); list_del_init(&sif->node); if (sif->remove_dev) { subsys_dev_iter_init(&iter, sp, NULL, NULL); while ((dev = subsys_dev_iter_next(&iter))) sif->remove_dev(dev, sif); subsys_dev_iter_exit(&iter); } mutex_unlock(&sp->mutex); /* * Decrement the reference count twice, once for the bus_to_subsys() * call in the start of this function, and the second one from the * reference increment in subsys_interface_register() */ subsys_put(sp); subsys_put(sp); } EXPORT_SYMBOL_GPL(subsys_interface_unregister); static void system_root_device_release(struct device *dev) { kfree(dev); } static int subsys_register(const struct bus_type *subsys, const struct attribute_group **groups, struct kobject *parent_of_root) { struct subsys_private *sp; struct device *dev; int err; err = bus_register(subsys); if (err < 0) return err; sp = bus_to_subsys(subsys); if (!sp) { err = -EINVAL; goto err_sp; } dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!dev) { err = -ENOMEM; goto err_dev; } err = dev_set_name(dev, "%s", subsys->name); if (err < 0) goto err_name; dev->kobj.parent = parent_of_root; dev->groups = groups; dev->release = system_root_device_release; err = device_register(dev); if (err < 0) goto err_dev_reg; sp->dev_root = dev; subsys_put(sp); return 0; err_dev_reg: put_device(dev); dev = NULL; err_name: kfree(dev); err_dev: subsys_put(sp); err_sp: bus_unregister(subsys); return err; } /** * subsys_system_register - register a subsystem at /sys/devices/system/ * @subsys: system subsystem * @groups: default attributes for the root device * * All 'system' subsystems have a /sys/devices/system/<name> root device * with the name of the subsystem. The root device can carry subsystem- * wide attributes. All registered devices are below this single root * device and are named after the subsystem with a simple enumeration * number appended. The registered devices are not explicitly named; * only 'id' in the device needs to be set. * * Do not use this interface for anything new, it exists for compatibility * with bad ideas only. New subsystems should use plain subsystems; and * add the subsystem-wide attributes should be added to the subsystem * directory itself and not some create fake root-device placed in * /sys/devices/system/<name>. */ int subsys_system_register(const struct bus_type *subsys, const struct attribute_group **groups) { return subsys_register(subsys, groups, &system_kset->kobj); } EXPORT_SYMBOL_GPL(subsys_system_register); /** * subsys_virtual_register - register a subsystem at /sys/devices/virtual/ * @subsys: virtual subsystem * @groups: default attributes for the root device * * All 'virtual' subsystems have a /sys/devices/system/<name> root device * with the name of the subsystem. The root device can carry subsystem-wide * attributes. All registered devices are below this single root device. * There's no restriction on device naming. This is for kernel software * constructs which need sysfs interface. */ int subsys_virtual_register(const struct bus_type *subsys, const struct attribute_group **groups) { struct kobject *virtual_dir; virtual_dir = virtual_device_parent(); if (!virtual_dir) return -ENOMEM; return subsys_register(subsys, groups, virtual_dir); } EXPORT_SYMBOL_GPL(subsys_virtual_register); /** * driver_find - locate driver on a bus by its name. * @name: name of the driver. * @bus: bus to scan for the driver. * * Call kset_find_obj() to iterate over list of drivers on * a bus to find driver by name. Return driver if found. * * This routine provides no locking to prevent the driver it returns * from being unregistered or unloaded while the caller is using it. * The caller is responsible for preventing this. */ struct device_driver *driver_find(const char *name, const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct kobject *k; struct driver_private *priv; if (!sp) return NULL; k = kset_find_obj(sp->drivers_kset, name); subsys_put(sp); if (!k) return NULL; priv = to_driver(k); /* Drop reference added by kset_find_obj() */ kobject_put(k); return priv->driver; } EXPORT_SYMBOL_GPL(driver_find); /* * Warning, the value could go to "removed" instantly after calling this function, so be very * careful when calling it... */ bool bus_is_registered(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); bool is_initialized = false; if (sp) { is_initialized = true; subsys_put(sp); } return is_initialized; } /** * bus_get_dev_root - return a pointer to the "device root" of a bus * @bus: bus to return the device root of. * * If a bus has a "device root" structure, return it, WITH THE REFERENCE * COUNT INCREMENTED. * * Note, when finished with the device, a call to put_device() is required. * * If the device root is not present (or bus is not a valid pointer), NULL * will be returned. */ struct device *bus_get_dev_root(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct device *dev_root; if (!sp) return NULL; dev_root = get_device(sp->dev_root); subsys_put(sp); return dev_root; } EXPORT_SYMBOL_GPL(bus_get_dev_root); int __init buses_init(void) { bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL); if (!bus_kset) return -ENOMEM; system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj); if (!system_kset) { /* Do error handling here as devices_init() do */ kset_unregister(bus_kset); bus_kset = NULL; pr_err("%s: failed to create and add kset 'bus'\n", __func__); return -ENOMEM; } return 0; } |
3401 3234 3385 3395 3395 3314 3241 3300 3321 3321 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 | // SPDX-License-Identifier: GPL-2.0 #include <linux/memblock.h> #include <linux/mmdebug.h> #include <linux/export.h> #include <linux/mm.h> #include <asm/page.h> #include <linux/vmalloc.h> #include "physaddr.h" #ifdef CONFIG_X86_64 #ifdef CONFIG_DEBUG_VIRTUAL unsigned long __phys_addr(unsigned long x) { unsigned long y = x - __START_KERNEL_map; /* use the carry flag to determine if x was < __START_KERNEL_map */ if (unlikely(x > y)) { x = y + phys_base; VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE); } else { x = y + (__START_KERNEL_map - PAGE_OFFSET); /* carry flag will be set if starting x was >= PAGE_OFFSET */ VIRTUAL_BUG_ON((x > y) || !phys_addr_valid(x)); } return x; } EXPORT_SYMBOL(__phys_addr); unsigned long __phys_addr_symbol(unsigned long x) { unsigned long y = x - __START_KERNEL_map; /* only check upper bounds since lower bounds will trigger carry */ VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE); return y + phys_base; } EXPORT_SYMBOL(__phys_addr_symbol); #endif bool __virt_addr_valid(unsigned long x) { unsigned long y = x - __START_KERNEL_map; /* use the carry flag to determine if x was < __START_KERNEL_map */ if (unlikely(x > y)) { x = y + phys_base; if (y >= KERNEL_IMAGE_SIZE) return false; } else { x = y + (__START_KERNEL_map - PAGE_OFFSET); /* carry flag will be set if starting x was >= PAGE_OFFSET */ if ((x > y) || !phys_addr_valid(x)) return false; } return pfn_valid(x >> PAGE_SHIFT); } EXPORT_SYMBOL(__virt_addr_valid); #else #ifdef CONFIG_DEBUG_VIRTUAL unsigned long __phys_addr(unsigned long x) { unsigned long phys_addr = x - PAGE_OFFSET; /* VMALLOC_* aren't constants */ VIRTUAL_BUG_ON(x < PAGE_OFFSET); VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); /* max_low_pfn is set early, but not _that_ early */ if (max_low_pfn) { VIRTUAL_BUG_ON((phys_addr >> PAGE_SHIFT) > max_low_pfn); BUG_ON(slow_virt_to_phys((void *)x) != phys_addr); } return phys_addr; } EXPORT_SYMBOL(__phys_addr); #endif bool __virt_addr_valid(unsigned long x) { if (x < PAGE_OFFSET) return false; if (__vmalloc_start_set && is_vmalloc_addr((void *) x)) return false; if (x >= FIXADDR_START) return false; return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); } EXPORT_SYMBOL(__virt_addr_valid); #endif /* CONFIG_X86_64 */ |
2 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 | // SPDX-License-Identifier: GPL-2.0 /* * USB Empeg empeg-car player driver * * Copyright (C) 2000, 2001 * Gary Brubaker (xavyer@ix.netcom.com) * * Copyright (C) 1999 - 2001 * Greg Kroah-Hartman (greg@kroah.com) * * See Documentation/usb/usb-serial.rst for more information on using this * driver */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Gary Brubaker <xavyer@ix.netcom.com>" #define DRIVER_DESC "USB Empeg Mark I/II Driver" #define EMPEG_VENDOR_ID 0x084f #define EMPEG_PRODUCT_ID 0x0001 /* function prototypes for an empeg-car player */ static int empeg_startup(struct usb_serial *serial); static void empeg_init_termios(struct tty_struct *tty); static const struct usb_device_id id_table[] = { { USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver empeg_device = { .driver = { .name = "empeg", }, .id_table = id_table, .num_ports = 1, .bulk_out_size = 256, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .attach = empeg_startup, .init_termios = empeg_init_termios, }; static struct usb_serial_driver * const serial_drivers[] = { &empeg_device, NULL }; static int empeg_startup(struct usb_serial *serial) { int r; if (serial->dev->actconfig->desc.bConfigurationValue != 1) { dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); return -ENODEV; } r = usb_reset_configuration(serial->dev); /* continue on with initialization */ return r; } static void empeg_init_termios(struct tty_struct *tty) { struct ktermios *termios = &tty->termios; /* * The empeg-car player wants these particular tty settings. * You could, for example, change the baud rate, however the * player only supports 115200 (currently), so there is really * no point in support for changes to the tty settings. * (at least for now) * * The default requirements for this device are: */ termios->c_iflag &= ~(IGNBRK /* disable ignore break */ | BRKINT /* disable break causes interrupt */ | PARMRK /* disable mark parity errors */ | ISTRIP /* disable clear high bit of input characters */ | INLCR /* disable translate NL to CR */ | IGNCR /* disable ignore CR */ | ICRNL /* disable translate CR to NL */ | IXON); /* disable enable XON/XOFF flow control */ termios->c_oflag &= ~OPOST; /* disable postprocess output characters */ termios->c_lflag &= ~(ECHO /* disable echo input characters */ | ECHONL /* disable echo new line */ | ICANON /* disable erase, kill, werase, and rprnt special characters */ | ISIG /* disable interrupt, quit, and suspend special characters */ | IEXTEN); /* disable non-POSIX special characters */ termios->c_cflag &= ~(CSIZE /* no size */ | PARENB /* disable parity bit */ | CBAUD); /* clear current baud rate */ termios->c_cflag |= CS8; /* character size 8 bits */ tty_encode_baud_rate(tty, 115200, 115200); } module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2"); |
153 168 167 121 120 166 158 13 13 13 10 10 10 10 10 10 9 10 10 10 10 10 10 10 70 70 69 64 67 53 13 13 63 16 16 62 68 12 12 12 7 3 4 99 100 101 99 95 99 12 97 87 11 12 44 44 93 7 115 72 71 60 11 67 67 70 102 101 63 38 96 96 101 9 9 9 9 9 9 9 12 9 2 9 9 12 12 12 9 4 4 4 4 3 4 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/read_write.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/slab.h> #include <linux/stat.h> #include <linux/sched/xacct.h> #include <linux/fcntl.h> #include <linux/file.h> #include <linux/uio.h> #include <linux/fsnotify.h> #include <linux/security.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/pagemap.h> #include <linux/splice.h> #include <linux/compat.h> #include <linux/mount.h> #include <linux/fs.h> #include "internal.h" #include <linux/uaccess.h> #include <asm/unistd.h> const struct file_operations generic_ro_fops = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .mmap = generic_file_readonly_mmap, .splice_read = filemap_splice_read, }; EXPORT_SYMBOL(generic_ro_fops); static inline bool unsigned_offsets(struct file *file) { return file->f_op->fop_flags & FOP_UNSIGNED_OFFSET; } /** * vfs_setpos_cookie - update the file offset for lseek and reset cookie * @file: file structure in question * @offset: file offset to seek to * @maxsize: maximum file size * @cookie: cookie to reset * * Update the file offset to the value specified by @offset if the given * offset is valid and it is not equal to the current file offset and * reset the specified cookie to indicate that a seek happened. * * Return the specified offset on success and -EINVAL on invalid offset. */ static loff_t vfs_setpos_cookie(struct file *file, loff_t offset, loff_t maxsize, u64 *cookie) { if (offset < 0 && !unsigned_offsets(file)) return -EINVAL; if (offset > maxsize) return -EINVAL; if (offset != file->f_pos) { file->f_pos = offset; if (cookie) *cookie = 0; } return offset; } /** * vfs_setpos - update the file offset for lseek * @file: file structure in question * @offset: file offset to seek to * @maxsize: maximum file size * * This is a low-level filesystem helper for updating the file offset to * the value specified by @offset if the given offset is valid and it is * not equal to the current file offset. * * Return the specified offset on success and -EINVAL on invalid offset. */ loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize) { return vfs_setpos_cookie(file, offset, maxsize, NULL); } EXPORT_SYMBOL(vfs_setpos); /** * must_set_pos - check whether f_pos has to be updated * @file: file to seek on * @offset: offset to use * @whence: type of seek operation * @eof: end of file * * Check whether f_pos needs to be updated and update @offset according * to @whence. * * Return: 0 if f_pos doesn't need to be updated, 1 if f_pos has to be * updated, and negative error code on failure. */ static int must_set_pos(struct file *file, loff_t *offset, int whence, loff_t eof) { switch (whence) { case SEEK_END: *offset += eof; break; case SEEK_CUR: /* * Here we special-case the lseek(fd, 0, SEEK_CUR) * position-querying operation. Avoid rewriting the "same" * f_pos value back to the file because a concurrent read(), * write() or lseek() might have altered it */ if (*offset == 0) { *offset = file->f_pos; return 0; } break; case SEEK_DATA: /* * In the generic case the entire file is data, so as long as * offset isn't at the end of the file then the offset is data. */ if ((unsigned long long)*offset >= eof) return -ENXIO; break; case SEEK_HOLE: /* * There is a virtual hole at the end of the file, so as long as * offset isn't i_size or larger, return i_size. */ if ((unsigned long long)*offset >= eof) return -ENXIO; *offset = eof; break; } return 1; } /** * generic_file_llseek_size - generic llseek implementation for regular files * @file: file structure to seek on * @offset: file offset to seek to * @whence: type of seek * @maxsize: max size of this file in file system * @eof: offset used for SEEK_END position * * This is a variant of generic_file_llseek that allows passing in a custom * maximum file size and a custom EOF position, for e.g. hashed directories * * Synchronization: * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms) * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes. * read/writes behave like SEEK_SET against seeks. */ loff_t generic_file_llseek_size(struct file *file, loff_t offset, int whence, loff_t maxsize, loff_t eof) { int ret; ret = must_set_pos(file, &offset, whence, eof); if (ret < 0) return ret; if (ret == 0) return offset; if (whence == SEEK_CUR) { /* * If the file requires locking via f_pos_lock we know * that mutual exclusion for SEEK_CUR on the same file * is guaranteed. If the file isn't locked, we take * f_lock to protect against f_pos races with other * SEEK_CURs. */ if (file_seek_cur_needs_f_lock(file)) { guard(spinlock)(&file->f_lock); return vfs_setpos(file, file->f_pos + offset, maxsize); } return vfs_setpos(file, file->f_pos + offset, maxsize); } return vfs_setpos(file, offset, maxsize); } EXPORT_SYMBOL(generic_file_llseek_size); /** * generic_llseek_cookie - versioned llseek implementation * @file: file structure to seek on * @offset: file offset to seek to * @whence: type of seek * @cookie: cookie to update * * See generic_file_llseek for a general description and locking assumptions. * * In contrast to generic_file_llseek, this function also resets a * specified cookie to indicate a seek took place. */ loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence, u64 *cookie) { struct inode *inode = file->f_mapping->host; loff_t maxsize = inode->i_sb->s_maxbytes; loff_t eof = i_size_read(inode); int ret; if (WARN_ON_ONCE(!cookie)) return -EINVAL; /* * Require that this is only used for directories that guarantee * synchronization between readdir and seek so that an update to * @cookie is correctly synchronized with concurrent readdir. */ if (WARN_ON_ONCE(!(file->f_mode & FMODE_ATOMIC_POS))) return -EINVAL; ret = must_set_pos(file, &offset, whence, eof); if (ret < 0) return ret; if (ret == 0) return offset; /* No need to hold f_lock because we know that f_pos_lock is held. */ if (whence == SEEK_CUR) return vfs_setpos_cookie(file, file->f_pos + offset, maxsize, cookie); return vfs_setpos_cookie(file, offset, maxsize, cookie); } EXPORT_SYMBOL(generic_llseek_cookie); /** * generic_file_llseek - generic llseek implementation for regular files * @file: file structure to seek on * @offset: file offset to seek to * @whence: type of seek * * This is a generic implemenation of ->llseek useable for all normal local * filesystems. It just updates the file offset to the value specified by * @offset and @whence. */ loff_t generic_file_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; return generic_file_llseek_size(file, offset, whence, inode->i_sb->s_maxbytes, i_size_read(inode)); } EXPORT_SYMBOL(generic_file_llseek); /** * fixed_size_llseek - llseek implementation for fixed-sized devices * @file: file structure to seek on * @offset: file offset to seek to * @whence: type of seek * @size: size of the file * */ loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size) { switch (whence) { case SEEK_SET: case SEEK_CUR: case SEEK_END: return generic_file_llseek_size(file, offset, whence, size, size); default: return -EINVAL; } } EXPORT_SYMBOL(fixed_size_llseek); /** * no_seek_end_llseek - llseek implementation for fixed-sized devices * @file: file structure to seek on * @offset: file offset to seek to * @whence: type of seek * */ loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence) { switch (whence) { case SEEK_SET: case SEEK_CUR: return generic_file_llseek_size(file, offset, whence, OFFSET_MAX, 0); default: return -EINVAL; } } EXPORT_SYMBOL(no_seek_end_llseek); /** * no_seek_end_llseek_size - llseek implementation for fixed-sized devices * @file: file structure to seek on * @offset: file offset to seek to * @whence: type of seek * @size: maximal offset allowed * */ loff_t no_seek_end_llseek_size(struct file *file, loff_t offset, int whence, loff_t size) { switch (whence) { case SEEK_SET: case SEEK_CUR: return generic_file_llseek_size(file, offset, whence, size, 0); default: return -EINVAL; } } EXPORT_SYMBOL(no_seek_end_llseek_size); /** * noop_llseek - No Operation Performed llseek implementation * @file: file structure to seek on * @offset: file offset to seek to * @whence: type of seek * * This is an implementation of ->llseek useable for the rare special case when * userspace expects the seek to succeed but the (device) file is actually not * able to perform the seek. In this case you use noop_llseek() instead of * falling back to the default implementation of ->llseek. */ loff_t noop_llseek(struct file *file, loff_t offset, int whence) { return file->f_pos; } EXPORT_SYMBOL(noop_llseek); loff_t default_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file_inode(file); loff_t retval; retval = inode_lock_killable(inode); if (retval) return retval; switch (whence) { case SEEK_END: offset += i_size_read(inode); break; case SEEK_CUR: if (offset == 0) { retval = file->f_pos; goto out; } offset += file->f_pos; break; case SEEK_DATA: /* * In the generic case the entire file is data, so as * long as offset isn't at the end of the file then the * offset is data. */ if (offset >= inode->i_size) { retval = -ENXIO; goto out; } break; case SEEK_HOLE: /* * There is a virtual hole at the end of the file, so * as long as offset isn't i_size or larger, return * i_size. */ if (offset >= inode->i_size) { retval = -ENXIO; goto out; } offset = inode->i_size; break; } retval = -EINVAL; if (offset >= 0 || unsigned_offsets(file)) { if (offset != file->f_pos) file->f_pos = offset; retval = offset; } out: inode_unlock(inode); return retval; } EXPORT_SYMBOL(default_llseek); loff_t vfs_llseek(struct file *file, loff_t offset, int whence) { if (!(file->f_mode & FMODE_LSEEK)) return -ESPIPE; return file->f_op->llseek(file, offset, whence); } EXPORT_SYMBOL(vfs_llseek); static off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence) { off_t retval; CLASS(fd_pos, f)(fd); if (fd_empty(f)) return -EBADF; retval = -EINVAL; if (whence <= SEEK_MAX) { loff_t res = vfs_llseek(fd_file(f), offset, whence); retval = res; if (res != (loff_t)retval) retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */ } return retval; } SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) { return ksys_lseek(fd, offset, whence); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence) { return ksys_lseek(fd, offset, whence); } #endif #if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT) || \ defined(__ARCH_WANT_SYS_LLSEEK) SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high, unsigned long, offset_low, loff_t __user *, result, unsigned int, whence) { int retval; CLASS(fd_pos, f)(fd); loff_t offset; if (fd_empty(f)) return -EBADF; if (whence > SEEK_MAX) return -EINVAL; offset = vfs_llseek(fd_file(f), ((loff_t) offset_high << 32) | offset_low, whence); retval = (int)offset; if (offset >= 0) { retval = -EFAULT; if (!copy_to_user(result, &offset, sizeof(offset))) retval = 0; } return retval; } #endif int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count) { int mask = read_write == READ ? MAY_READ : MAY_WRITE; int ret; if (unlikely((ssize_t) count < 0)) return -EINVAL; if (ppos) { loff_t pos = *ppos; if (unlikely(pos < 0)) { if (!unsigned_offsets(file)) return -EINVAL; if (count >= -pos) /* both values are in 0..LLONG_MAX */ return -EOVERFLOW; } else if (unlikely((loff_t) (pos + count) < 0)) { if (!unsigned_offsets(file)) return -EINVAL; } } ret = security_file_permission(file, mask); if (ret) return ret; return fsnotify_file_area_perm(file, mask, ppos, count); } EXPORT_SYMBOL(rw_verify_area); static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { struct kiocb kiocb; struct iov_iter iter; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = (ppos ? *ppos : 0); iov_iter_ubuf(&iter, ITER_DEST, buf, len); ret = filp->f_op->read_iter(&kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); if (ppos) *ppos = kiocb.ki_pos; return ret; } static int warn_unsupported(struct file *file, const char *op) { pr_warn_ratelimited( "kernel %s not supported for file %pD4 (pid: %d comm: %.20s)\n", op, file, current->pid, current->comm); return -EINVAL; } ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos) { struct kvec iov = { .iov_base = buf, .iov_len = min_t(size_t, count, MAX_RW_COUNT), }; struct kiocb kiocb; struct iov_iter iter; ssize_t ret; if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ))) return -EINVAL; if (!(file->f_mode & FMODE_CAN_READ)) return -EINVAL; /* * Also fail if ->read_iter and ->read are both wired up as that * implies very convoluted semantics. */ if (unlikely(!file->f_op->read_iter || file->f_op->read)) return warn_unsupported(file, "read"); init_sync_kiocb(&kiocb, file); kiocb.ki_pos = pos ? *pos : 0; iov_iter_kvec(&iter, ITER_DEST, &iov, 1, iov.iov_len); ret = file->f_op->read_iter(&kiocb, &iter); if (ret > 0) { if (pos) *pos = kiocb.ki_pos; fsnotify_access(file); add_rchar(current, ret); } inc_syscr(current); return ret; } ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos) { ssize_t ret; ret = rw_verify_area(READ, file, pos, count); if (ret) return ret; return __kernel_read(file, buf, count, pos); } EXPORT_SYMBOL(kernel_read); ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (!(file->f_mode & FMODE_READ)) return -EBADF; if (!(file->f_mode & FMODE_CAN_READ)) return -EINVAL; if (unlikely(!access_ok(buf, count))) return -EFAULT; ret = rw_verify_area(READ, file, pos, count); if (ret) return ret; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; if (file->f_op->read) ret = file->f_op->read(file, buf, count, pos); else if (file->f_op->read_iter) ret = new_sync_read(file, buf, count, pos); else ret = -EINVAL; if (ret > 0) { fsnotify_access(file); add_rchar(current, ret); } inc_syscr(current); return ret; } static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct kiocb kiocb; struct iov_iter iter; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = (ppos ? *ppos : 0); iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)buf, len); ret = filp->f_op->write_iter(&kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0 && ppos) *ppos = kiocb.ki_pos; return ret; } /* caller is responsible for file_start_write/file_end_write */ ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *pos) { struct kiocb kiocb; ssize_t ret; if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE))) return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; /* * Also fail if ->write_iter and ->write are both wired up as that * implies very convoluted semantics. */ if (unlikely(!file->f_op->write_iter || file->f_op->write)) return warn_unsupported(file, "write"); init_sync_kiocb(&kiocb, file); kiocb.ki_pos = pos ? *pos : 0; ret = file->f_op->write_iter(&kiocb, from); if (ret > 0) { if (pos) *pos = kiocb.ki_pos; fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); return ret; } /* caller is responsible for file_start_write/file_end_write */ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos) { struct kvec iov = { .iov_base = (void *)buf, .iov_len = min_t(size_t, count, MAX_RW_COUNT), }; struct iov_iter iter; iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, iov.iov_len); return __kernel_write_iter(file, &iter, pos); } /* * This "EXPORT_SYMBOL_GPL()" is more of a "EXPORT_SYMBOL_DONTUSE()", * but autofs is one of the few internal kernel users that actually * wants this _and_ can be built as a module. So we need to export * this symbol for autofs, even though it really isn't appropriate * for any other kernel modules. */ EXPORT_SYMBOL_GPL(__kernel_write); ssize_t kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos) { ssize_t ret; ret = rw_verify_area(WRITE, file, pos, count); if (ret) return ret; file_start_write(file); ret = __kernel_write(file, buf, count, pos); file_end_write(file); return ret; } EXPORT_SYMBOL(kernel_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; if (unlikely(!access_ok(buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret) return ret; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; file_start_write(file); if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else if (file->f_op->write_iter) ret = new_sync_write(file, buf, count, pos); else ret = -EINVAL; if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); file_end_write(file); return ret; } /* file_ppos returns &file->f_pos or NULL if file is stream */ static inline loff_t *file_ppos(struct file *file) { return file->f_mode & FMODE_STREAM ? NULL : &file->f_pos; } ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count) { CLASS(fd_pos, f)(fd); ssize_t ret = -EBADF; if (!fd_empty(f)) { loff_t pos, *ppos = file_ppos(fd_file(f)); if (ppos) { pos = *ppos; ppos = &pos; } ret = vfs_read(fd_file(f), buf, count, ppos); if (ret >= 0 && ppos) fd_file(f)->f_pos = pos; } return ret; } SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) { return ksys_read(fd, buf, count); } ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count) { CLASS(fd_pos, f)(fd); ssize_t ret = -EBADF; if (!fd_empty(f)) { loff_t pos, *ppos = file_ppos(fd_file(f)); if (ppos) { pos = *ppos; ppos = &pos; } ret = vfs_write(fd_file(f), buf, count, ppos); if (ret >= 0 && ppos) fd_file(f)->f_pos = pos; } return ret; } SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, size_t, count) { return ksys_write(fd, buf, count); } ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count, loff_t pos) { if (pos < 0) return -EINVAL; CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; if (fd_file(f)->f_mode & FMODE_PREAD) return vfs_read(fd_file(f), buf, count, &pos); return -ESPIPE; } SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf, size_t, count, loff_t, pos) { return ksys_pread64(fd, buf, count, pos); } #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PREAD64) COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, buf, size_t, count, compat_arg_u64_dual(pos)) { return ksys_pread64(fd, buf, count, compat_arg_u64_glue(pos)); } #endif ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf, size_t count, loff_t pos) { if (pos < 0) return -EINVAL; CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; if (fd_file(f)->f_mode & FMODE_PWRITE) return vfs_write(fd_file(f), buf, count, &pos); return -ESPIPE; } SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf, size_t, count, loff_t, pos) { return ksys_pwrite64(fd, buf, count, pos); } #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PWRITE64) COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, const char __user *, buf, size_t, count, compat_arg_u64_dual(pos)) { return ksys_pwrite64(fd, buf, count, compat_arg_u64_glue(pos)); } #endif static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, loff_t *ppos, int type, rwf_t flags) { struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); ret = kiocb_set_rw_flags(&kiocb, flags, type); if (ret) return ret; kiocb.ki_pos = (ppos ? *ppos : 0); if (type == READ) ret = filp->f_op->read_iter(&kiocb, iter); else ret = filp->f_op->write_iter(&kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); if (ppos) *ppos = kiocb.ki_pos; return ret; } /* Do it by hand, with file-ops */ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter, loff_t *ppos, int type, rwf_t flags) { ssize_t ret = 0; if (flags & ~RWF_HIPRI) return -EOPNOTSUPP; while (iov_iter_count(iter)) { ssize_t nr; if (type == READ) { nr = filp->f_op->read(filp, iter_iov_addr(iter), iter_iov_len(iter), ppos); } else { nr = filp->f_op->write(filp, iter_iov_addr(iter), iter_iov_len(iter), ppos); } if (nr < 0) { if (!ret) ret = nr; break; } ret += nr; if (nr != iter_iov_len(iter)) break; iov_iter_advance(iter, nr); } return ret; } ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb, struct iov_iter *iter) { size_t tot_len; ssize_t ret = 0; if (!file->f_op->read_iter) return -EINVAL; if (!(file->f_mode & FMODE_READ)) return -EBADF; if (!(file->f_mode & FMODE_CAN_READ)) return -EINVAL; tot_len = iov_iter_count(iter); if (!tot_len) goto out; ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len); if (ret < 0) return ret; ret = file->f_op->read_iter(iocb, iter); out: if (ret >= 0) fsnotify_access(file); return ret; } EXPORT_SYMBOL(vfs_iocb_iter_read); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags) { size_t tot_len; ssize_t ret = 0; if (!file->f_op->read_iter) return -EINVAL; if (!(file->f_mode & FMODE_READ)) return -EBADF; if (!(file->f_mode & FMODE_CAN_READ)) return -EINVAL; tot_len = iov_iter_count(iter); if (!tot_len) goto out; ret = rw_verify_area(READ, file, ppos, tot_len); if (ret < 0) return ret; ret = do_iter_readv_writev(file, iter, ppos, READ, flags); out: if (ret >= 0) fsnotify_access(file); return ret; } EXPORT_SYMBOL(vfs_iter_read); /* * Caller is responsible for calling kiocb_end_write() on completion * if async iocb was queued. */ ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb, struct iov_iter *iter) { size_t tot_len; ssize_t ret = 0; if (!file->f_op->write_iter) return -EINVAL; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; tot_len = iov_iter_count(iter); if (!tot_len) return 0; ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len); if (ret < 0) return ret; kiocb_start_write(iocb); ret = file->f_op->write_iter(iocb, iter); if (ret != -EIOCBQUEUED) kiocb_end_write(iocb); if (ret > 0) fsnotify_modify(file); return ret; } EXPORT_SYMBOL(vfs_iocb_iter_write); ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags) { size_t tot_len; ssize_t ret; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; if (!file->f_op->write_iter) return -EINVAL; tot_len = iov_iter_count(iter); if (!tot_len) return 0; ret = rw_verify_area(WRITE, file, ppos, tot_len); if (ret < 0) return ret; file_start_write(file); ret = do_iter_readv_writev(file, iter, ppos, WRITE, flags); if (ret > 0) fsnotify_modify(file); file_end_write(file); return ret; } EXPORT_SYMBOL(vfs_iter_write); static ssize_t vfs_readv(struct file *file, const struct iovec __user *vec, unsigned long vlen, loff_t *pos, rwf_t flags) { struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; size_t tot_len; ssize_t ret = 0; if (!(file->f_mode & FMODE_READ)) return -EBADF; if (!(file->f_mode & FMODE_CAN_READ)) return -EINVAL; ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); if (ret < 0) return ret; tot_len = iov_iter_count(&iter); if (!tot_len) goto out; ret = rw_verify_area(READ, file, pos, tot_len); if (ret < 0) goto out; if (file->f_op->read_iter) ret = do_iter_readv_writev(file, &iter, pos, READ, flags); else ret = do_loop_readv_writev(file, &iter, pos, READ, flags); out: if (ret >= 0) fsnotify_access(file); kfree(iov); return ret; } static ssize_t vfs_writev(struct file *file, const struct iovec __user *vec, unsigned long vlen, loff_t *pos, rwf_t flags) { struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; size_t tot_len; ssize_t ret = 0; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; ret = import_iovec(ITER_SOURCE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); if (ret < 0) return ret; tot_len = iov_iter_count(&iter); if (!tot_len) goto out; ret = rw_verify_area(WRITE, file, pos, tot_len); if (ret < 0) goto out; file_start_write(file); if (file->f_op->write_iter) ret = do_iter_readv_writev(file, &iter, pos, WRITE, flags); else ret = do_loop_readv_writev(file, &iter, pos, WRITE, flags); if (ret > 0) fsnotify_modify(file); file_end_write(file); out: kfree(iov); return ret; } static ssize_t do_readv(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, rwf_t flags) { CLASS(fd_pos, f)(fd); ssize_t ret = -EBADF; if (!fd_empty(f)) { loff_t pos, *ppos = file_ppos(fd_file(f)); if (ppos) { pos = *ppos; ppos = &pos; } ret = vfs_readv(fd_file(f), vec, vlen, ppos, flags); if (ret >= 0 && ppos) fd_file(f)->f_pos = pos; } if (ret > 0) add_rchar(current, ret); inc_syscr(current); return ret; } static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, rwf_t flags) { CLASS(fd_pos, f)(fd); ssize_t ret = -EBADF; if (!fd_empty(f)) { loff_t pos, *ppos = file_ppos(fd_file(f)); if (ppos) { pos = *ppos; ppos = &pos; } ret = vfs_writev(fd_file(f), vec, vlen, ppos, flags); if (ret >= 0 && ppos) fd_file(f)->f_pos = pos; } if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; } static inline loff_t pos_from_hilo(unsigned long high, unsigned long low) { #define HALF_LONG_BITS (BITS_PER_LONG / 2) return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low; } static ssize_t do_preadv(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, loff_t pos, rwf_t flags) { ssize_t ret = -EBADF; if (pos < 0) return -EINVAL; CLASS(fd, f)(fd); if (!fd_empty(f)) { ret = -ESPIPE; if (fd_file(f)->f_mode & FMODE_PREAD) ret = vfs_readv(fd_file(f), vec, vlen, &pos, flags); } if (ret > 0) add_rchar(current, ret); inc_syscr(current); return ret; } static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, loff_t pos, rwf_t flags) { ssize_t ret = -EBADF; if (pos < 0) return -EINVAL; CLASS(fd, f)(fd); if (!fd_empty(f)) { ret = -ESPIPE; if (fd_file(f)->f_mode & FMODE_PWRITE) ret = vfs_writev(fd_file(f), vec, vlen, &pos, flags); } if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; } SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen) { return do_readv(fd, vec, vlen, 0); } SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen) { return do_writev(fd, vec, vlen, 0); } SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h) { loff_t pos = pos_from_hilo(pos_h, pos_l); return do_preadv(fd, vec, vlen, pos, 0); } SYSCALL_DEFINE6(preadv2, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h, rwf_t, flags) { loff_t pos = pos_from_hilo(pos_h, pos_l); if (pos == -1) return do_readv(fd, vec, vlen, flags); return do_preadv(fd, vec, vlen, pos, flags); } SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h) { loff_t pos = pos_from_hilo(pos_h, pos_l); return do_pwritev(fd, vec, vlen, pos, 0); } SYSCALL_DEFINE6(pwritev2, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h, rwf_t, flags) { loff_t pos = pos_from_hilo(pos_h, pos_l); if (pos == -1) return do_writev(fd, vec, vlen, flags); return do_pwritev(fd, vec, vlen, pos, flags); } /* * Various compat syscalls. Note that they all pretend to take a native * iovec - import_iovec will properly treat those as compat_iovecs based on * in_compat_syscall(). */ #ifdef CONFIG_COMPAT #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64 COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, loff_t, pos) { return do_preadv(fd, vec, vlen, pos, 0); } #endif COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd, const struct iovec __user *, vec, compat_ulong_t, vlen, u32, pos_low, u32, pos_high) { loff_t pos = ((loff_t)pos_high << 32) | pos_low; return do_preadv(fd, vec, vlen, pos, 0); } #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2 COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, loff_t, pos, rwf_t, flags) { if (pos == -1) return do_readv(fd, vec, vlen, flags); return do_preadv(fd, vec, vlen, pos, flags); } #endif COMPAT_SYSCALL_DEFINE6(preadv2, compat_ulong_t, fd, const struct iovec __user *, vec, compat_ulong_t, vlen, u32, pos_low, u32, pos_high, rwf_t, flags) { loff_t pos = ((loff_t)pos_high << 32) | pos_low; if (pos == -1) return do_readv(fd, vec, vlen, flags); return do_preadv(fd, vec, vlen, pos, flags); } #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64 COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, loff_t, pos) { return do_pwritev(fd, vec, vlen, pos, 0); } #endif COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd, const struct iovec __user *,vec, compat_ulong_t, vlen, u32, pos_low, u32, pos_high) { loff_t pos = ((loff_t)pos_high << 32) | pos_low; return do_pwritev(fd, vec, vlen, pos, 0); } #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2 COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, loff_t, pos, rwf_t, flags) { if (pos == -1) return do_writev(fd, vec, vlen, flags); return do_pwritev(fd, vec, vlen, pos, flags); } #endif COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd, const struct iovec __user *,vec, compat_ulong_t, vlen, u32, pos_low, u32, pos_high, rwf_t, flags) { loff_t pos = ((loff_t)pos_high << 32) | pos_low; if (pos == -1) return do_writev(fd, vec, vlen, flags); return do_pwritev(fd, vec, vlen, pos, flags); } #endif /* CONFIG_COMPAT */ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count, loff_t max) { struct inode *in_inode, *out_inode; struct pipe_inode_info *opipe; loff_t pos; loff_t out_pos; ssize_t retval; int fl; /* * Get input file, and verify that it is ok.. */ CLASS(fd, in)(in_fd); if (fd_empty(in)) return -EBADF; if (!(fd_file(in)->f_mode & FMODE_READ)) return -EBADF; if (!ppos) { pos = fd_file(in)->f_pos; } else { pos = *ppos; if (!(fd_file(in)->f_mode & FMODE_PREAD)) return -ESPIPE; } retval = rw_verify_area(READ, fd_file(in), &pos, count); if (retval < 0) return retval; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; /* * Get output file, and verify that it is ok.. */ CLASS(fd, out)(out_fd); if (fd_empty(out)) return -EBADF; if (!(fd_file(out)->f_mode & FMODE_WRITE)) return -EBADF; in_inode = file_inode(fd_file(in)); out_inode = file_inode(fd_file(out)); out_pos = fd_file(out)->f_pos; if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); if (unlikely(pos + count > max)) { if (pos >= max) return -EOVERFLOW; count = max - pos; } fl = 0; #if 0 /* * We need to debate whether we can enable this or not. The * man page documents EAGAIN return for the output at least, * and the application is arguably buggy if it doesn't expect * EAGAIN on a non-blocking file descriptor. */ if (fd_file(in)->f_flags & O_NONBLOCK) fl = SPLICE_F_NONBLOCK; #endif opipe = get_pipe_info(fd_file(out), true); if (!opipe) { retval = rw_verify_area(WRITE, fd_file(out), &out_pos, count); if (retval < 0) return retval; retval = do_splice_direct(fd_file(in), &pos, fd_file(out), &out_pos, count, fl); } else { if (fd_file(out)->f_flags & O_NONBLOCK) fl |= SPLICE_F_NONBLOCK; retval = splice_file_to_pipe(fd_file(in), opipe, &pos, count, fl); } if (retval > 0) { add_rchar(current, retval); add_wchar(current, retval); fsnotify_access(fd_file(in)); fsnotify_modify(fd_file(out)); fd_file(out)->f_pos = out_pos; if (ppos) *ppos = pos; else fd_file(in)->f_pos = pos; } inc_syscr(current); inc_syscw(current); if (pos > max) retval = -EOVERFLOW; return retval; } SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count) { loff_t pos; off_t off; ssize_t ret; if (offset) { if (unlikely(get_user(off, offset))) return -EFAULT; pos = off; ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS); if (unlikely(put_user(pos, offset))) return -EFAULT; return ret; } return do_sendfile(out_fd, in_fd, NULL, count, 0); } SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count) { loff_t pos; ssize_t ret; if (offset) { if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t)))) return -EFAULT; ret = do_sendfile(out_fd, in_fd, &pos, count, 0); if (unlikely(put_user(pos, offset))) return -EFAULT; return ret; } return do_sendfile(out_fd, in_fd, NULL, count, 0); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, compat_off_t __user *, offset, compat_size_t, count) { loff_t pos; off_t off; ssize_t ret; if (offset) { if (unlikely(get_user(off, offset))) return -EFAULT; pos = off; ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS); if (unlikely(put_user(pos, offset))) return -EFAULT; return ret; } return do_sendfile(out_fd, in_fd, NULL, count, 0); } COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, compat_loff_t __user *, offset, compat_size_t, count) { loff_t pos; ssize_t ret; if (offset) { if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t)))) return -EFAULT; ret = do_sendfile(out_fd, in_fd, &pos, count, 0); if (unlikely(put_user(pos, offset))) return -EFAULT; return ret; } return do_sendfile(out_fd, in_fd, NULL, count, 0); } #endif /* * Performs necessary checks before doing a file copy * * Can adjust amount of bytes to copy via @req_count argument. * Returns appropriate error code that caller should return or * zero in case the copy should be allowed. */ static int generic_copy_file_checks(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t *req_count, unsigned int flags) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); uint64_t count = *req_count; loff_t size_in; int ret; ret = generic_file_rw_checks(file_in, file_out); if (ret) return ret; /* * We allow some filesystems to handle cross sb copy, but passing * a file of the wrong filesystem type to filesystem driver can result * in an attempt to dereference the wrong type of ->private_data, so * avoid doing that until we really have a good reason. * * nfs and cifs define several different file_system_type structures * and several different sets of file_operations, but they all end up * using the same ->copy_file_range() function pointer. */ if (flags & COPY_FILE_SPLICE) { /* cross sb splice is allowed */ } else if (file_out->f_op->copy_file_range) { if (file_in->f_op->copy_file_range != file_out->f_op->copy_file_range) return -EXDEV; } else if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) { return -EXDEV; } /* Don't touch certain kinds of inodes */ if (IS_IMMUTABLE(inode_out)) return -EPERM; if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out)) return -ETXTBSY; /* Ensure offsets don't wrap. */ if (pos_in + count < pos_in || pos_out + count < pos_out) return -EOVERFLOW; /* Shorten the copy to EOF */ size_in = i_size_read(inode_in); if (pos_in >= size_in) count = 0; else count = min(count, size_in - (uint64_t)pos_in); ret = generic_write_check_limits(file_out, pos_out, &count); if (ret) return ret; /* Don't allow overlapped copying within the same file. */ if (inode_in == inode_out && pos_out + count > pos_in && pos_out < pos_in + count) return -EINVAL; *req_count = count; return 0; } /* * copy_file_range() differs from regular file read and write in that it * specifically allows return partial success. When it does so is up to * the copy_file_range method. */ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags) { ssize_t ret; bool splice = flags & COPY_FILE_SPLICE; bool samesb = file_inode(file_in)->i_sb == file_inode(file_out)->i_sb; if (flags & ~COPY_FILE_SPLICE) return -EINVAL; ret = generic_copy_file_checks(file_in, pos_in, file_out, pos_out, &len, flags); if (unlikely(ret)) return ret; ret = rw_verify_area(READ, file_in, &pos_in, len); if (unlikely(ret)) return ret; ret = rw_verify_area(WRITE, file_out, &pos_out, len); if (unlikely(ret)) return ret; if (len == 0) return 0; file_start_write(file_out); /* * Cloning is supported by more file systems, so we implement copy on * same sb using clone, but for filesystems where both clone and copy * are supported (e.g. nfs,cifs), we only call the copy method. */ if (!splice && file_out->f_op->copy_file_range) { ret = file_out->f_op->copy_file_range(file_in, pos_in, file_out, pos_out, len, flags); } else if (!splice && file_in->f_op->remap_file_range && samesb) { ret = file_in->f_op->remap_file_range(file_in, pos_in, file_out, pos_out, min_t(loff_t, MAX_RW_COUNT, len), REMAP_FILE_CAN_SHORTEN); /* fallback to splice */ if (ret <= 0) splice = true; } else if (samesb) { /* Fallback to splice for same sb copy for backward compat */ splice = true; } file_end_write(file_out); if (!splice) goto done; /* * We can get here for same sb copy of filesystems that do not implement * ->copy_file_range() in case filesystem does not support clone or in * case filesystem supports clone but rejected the clone request (e.g. * because it was not block aligned). * * In both cases, fall back to kernel copy so we are able to maintain a * consistent story about which filesystems support copy_file_range() * and which filesystems do not, that will allow userspace tools to * make consistent desicions w.r.t using copy_file_range(). * * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE * for server-side-copy between any two sb. * * In any case, we call do_splice_direct() and not splice_file_range(), * without file_start_write() held, to avoid possible deadlocks related * to splicing from input file, while file_start_write() is held on * the output file on a different sb. */ ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out, min_t(size_t, len, MAX_RW_COUNT), 0); done: if (ret > 0) { fsnotify_access(file_in); add_rchar(current, ret); fsnotify_modify(file_out); add_wchar(current, ret); } inc_syscr(current); inc_syscw(current); return ret; } EXPORT_SYMBOL(vfs_copy_file_range); SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags) { loff_t pos_in; loff_t pos_out; ssize_t ret = -EBADF; CLASS(fd, f_in)(fd_in); if (fd_empty(f_in)) return -EBADF; CLASS(fd, f_out)(fd_out); if (fd_empty(f_out)) return -EBADF; if (off_in) { if (copy_from_user(&pos_in, off_in, sizeof(loff_t))) return -EFAULT; } else { pos_in = fd_file(f_in)->f_pos; } if (off_out) { if (copy_from_user(&pos_out, off_out, sizeof(loff_t))) return -EFAULT; } else { pos_out = fd_file(f_out)->f_pos; } if (flags != 0) return -EINVAL; ret = vfs_copy_file_range(fd_file(f_in), pos_in, fd_file(f_out), pos_out, len, flags); if (ret > 0) { pos_in += ret; pos_out += ret; if (off_in) { if (copy_to_user(off_in, &pos_in, sizeof(loff_t))) ret = -EFAULT; } else { fd_file(f_in)->f_pos = pos_in; } if (off_out) { if (copy_to_user(off_out, &pos_out, sizeof(loff_t))) ret = -EFAULT; } else { fd_file(f_out)->f_pos = pos_out; } } return ret; } /* * Don't operate on ranges the page cache doesn't support, and don't exceed the * LFS limits. If pos is under the limit it becomes a short access. If it * exceeds the limit we return -EFBIG. */ int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count) { struct inode *inode = file->f_mapping->host; loff_t max_size = inode->i_sb->s_maxbytes; loff_t limit = rlimit(RLIMIT_FSIZE); if (limit != RLIM_INFINITY) { if (pos >= limit) { send_sig(SIGXFSZ, current, 0); return -EFBIG; } *count = min(*count, limit - pos); } if (!(file->f_flags & O_LARGEFILE)) max_size = MAX_NON_LFS; if (unlikely(pos >= max_size)) return -EFBIG; *count = min(*count, max_size - pos); return 0; } EXPORT_SYMBOL_GPL(generic_write_check_limits); /* Like generic_write_checks(), but takes size of write instead of iter. */ int generic_write_checks_count(struct kiocb *iocb, loff_t *count) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; if (IS_SWAPFILE(inode)) return -ETXTBSY; if (!*count) return 0; if (iocb->ki_flags & IOCB_APPEND) iocb->ki_pos = i_size_read(inode); if ((iocb->ki_flags & IOCB_NOWAIT) && !((iocb->ki_flags & IOCB_DIRECT) || (file->f_op->fop_flags & FOP_BUFFER_WASYNC))) return -EINVAL; return generic_write_check_limits(iocb->ki_filp, iocb->ki_pos, count); } EXPORT_SYMBOL(generic_write_checks_count); /* * Performs necessary checks before doing a write * * Can adjust writing position or amount of bytes to write. * Returns appropriate error code that caller should return or * zero in case that write should be allowed. */ ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) { loff_t count = iov_iter_count(from); int ret; ret = generic_write_checks_count(iocb, &count); if (ret) return ret; iov_iter_truncate(from, count); return iov_iter_count(from); } EXPORT_SYMBOL(generic_write_checks); /* * Performs common checks before doing a file copy/clone * from @file_in to @file_out. */ int generic_file_rw_checks(struct file *file_in, struct file *file_out) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); /* Don't copy dirs, pipes, sockets... */ if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) return -EISDIR; if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) return -EINVAL; if (!(file_in->f_mode & FMODE_READ) || !(file_out->f_mode & FMODE_WRITE) || (file_out->f_flags & O_APPEND)) return -EBADF; return 0; } int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter) { size_t len = iov_iter_count(iter); if (!iter_is_ubuf(iter)) return -EINVAL; if (!is_power_of_2(len)) return -EINVAL; if (!IS_ALIGNED(iocb->ki_pos, len)) return -EINVAL; if (!(iocb->ki_flags & IOCB_DIRECT)) return -EOPNOTSUPP; return 0; } EXPORT_SYMBOL_GPL(generic_atomic_write_valid); |
1 1 1 3 3 3 3 3 3 3 3 2 1 1 1 1 1 3 3 3 9 9 9 8 8 8 6 8 6 8 5 4 4 4 1 1 3 3 3 9 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for NXP PN533 NFC Chip - USB transport layer * * Copyright (C) 2011 Instituto Nokia de Tecnologia * Copyright (C) 2012-2013 Tieto Poland */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/nfc.h> #include <linux/netdevice.h> #include <net/nfc/nfc.h> #include "pn533.h" #define VERSION "0.1" #define PN533_VENDOR_ID 0x4CC #define PN533_PRODUCT_ID 0x2533 #define SCM_VENDOR_ID 0x4E6 #define SCL3711_PRODUCT_ID 0x5591 #define SONY_VENDOR_ID 0x054c #define PASORI_PRODUCT_ID 0x02e1 #define ACS_VENDOR_ID 0x072f #define ACR122U_PRODUCT_ID 0x2200 static const struct usb_device_id pn533_usb_table[] = { { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID), .driver_info = PN533_DEVICE_STD }, { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID), .driver_info = PN533_DEVICE_STD }, { USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID), .driver_info = PN533_DEVICE_PASORI }, { USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID), .driver_info = PN533_DEVICE_ACR122U }, { } }; MODULE_DEVICE_TABLE(usb, pn533_usb_table); struct pn533_usb_phy { struct usb_device *udev; struct usb_interface *interface; struct urb *out_urb; struct urb *in_urb; struct urb *ack_urb; u8 *ack_buffer; struct pn533 *priv; }; static void pn533_recv_response(struct urb *urb) { struct pn533_usb_phy *phy = urb->context; struct sk_buff *skb = NULL; if (!urb->status) { skb = alloc_skb(urb->actual_length, GFP_ATOMIC); if (!skb) { nfc_err(&phy->udev->dev, "failed to alloc memory\n"); } else { skb_put_data(skb, urb->transfer_buffer, urb->actual_length); } } pn533_recv_frame(phy->priv, skb, urb->status); } static int pn533_submit_urb_for_response(struct pn533_usb_phy *phy, gfp_t flags) { phy->in_urb->complete = pn533_recv_response; return usb_submit_urb(phy->in_urb, flags); } static void pn533_recv_ack(struct urb *urb) { struct pn533_usb_phy *phy = urb->context; struct pn533 *priv = phy->priv; struct pn533_cmd *cmd = priv->cmd; struct pn533_std_frame *in_frame; int rc; cmd->status = urb->status; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: dev_dbg(&phy->udev->dev, "The urb has been stopped (status %d)\n", urb->status); goto sched_wq; case -ESHUTDOWN: default: nfc_err(&phy->udev->dev, "Urb failure (status %d)\n", urb->status); goto sched_wq; } in_frame = phy->in_urb->transfer_buffer; if (!pn533_rx_frame_is_ack(in_frame)) { nfc_err(&phy->udev->dev, "Received an invalid ack\n"); cmd->status = -EIO; goto sched_wq; } rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC); if (rc) { nfc_err(&phy->udev->dev, "usb_submit_urb failed with result %d\n", rc); cmd->status = rc; goto sched_wq; } return; sched_wq: queue_work(priv->wq, &priv->cmd_complete_work); } static int pn533_submit_urb_for_ack(struct pn533_usb_phy *phy, gfp_t flags) { phy->in_urb->complete = pn533_recv_ack; return usb_submit_urb(phy->in_urb, flags); } static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags) { struct pn533_usb_phy *phy = dev->phy; static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ if (!phy->ack_buffer) { phy->ack_buffer = kmemdup(ack, sizeof(ack), flags); if (!phy->ack_buffer) return -ENOMEM; } phy->ack_urb->transfer_buffer = phy->ack_buffer; phy->ack_urb->transfer_buffer_length = sizeof(ack); return usb_submit_urb(phy->ack_urb, flags); } struct pn533_out_arg { struct pn533_usb_phy *phy; struct completion done; }; static int pn533_usb_send_frame(struct pn533 *dev, struct sk_buff *out) { struct pn533_usb_phy *phy = dev->phy; struct pn533_out_arg arg; void *cntx; int rc; if (phy->priv == NULL) phy->priv = dev; phy->out_urb->transfer_buffer = out->data; phy->out_urb->transfer_buffer_length = out->len; print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); arg.phy = phy; init_completion(&arg.done); cntx = phy->out_urb->context; phy->out_urb->context = &arg; rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); if (rc) return rc; wait_for_completion(&arg.done); phy->out_urb->context = cntx; if (dev->protocol_type == PN533_PROTO_REQ_RESP) { /* request for response for sent packet directly */ rc = pn533_submit_urb_for_response(phy, GFP_KERNEL); if (rc) goto error; } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) { /* request for ACK if that's the case */ rc = pn533_submit_urb_for_ack(phy, GFP_KERNEL); if (rc) goto error; } return 0; error: usb_unlink_urb(phy->out_urb); return rc; } static void pn533_usb_abort_cmd(struct pn533 *dev, gfp_t flags) { struct pn533_usb_phy *phy = dev->phy; /* ACR122U does not support any command which aborts last * issued command i.e. as ACK for standard PN533. Additionally, * it behaves stange, sending broken or incorrect responses, * when we cancel urb before the chip will send response. */ if (dev->device_type == PN533_DEVICE_ACR122U) return; /* An ack will cancel the last issued command */ pn533_usb_send_ack(dev, flags); /* cancel the urb request */ usb_kill_urb(phy->in_urb); } /* ACR122 specific structs and functions */ /* ACS ACR122 pn533 frame definitions */ #define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \ + 2) #define PN533_ACR122_TX_FRAME_TAIL_LEN 0 #define PN533_ACR122_RX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_rx_frame) \ + 2) #define PN533_ACR122_RX_FRAME_TAIL_LEN 2 #define PN533_ACR122_FRAME_MAX_PAYLOAD_LEN PN533_STD_FRAME_MAX_PAYLOAD_LEN /* CCID messages types */ #define PN533_ACR122_PC_TO_RDR_ICCPOWERON 0x62 #define PN533_ACR122_PC_TO_RDR_ESCAPE 0x6B #define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83 struct pn533_acr122_ccid_hdr { u8 type; u32 datalen; u8 slot; u8 seq; /* * 3 msg specific bytes or status, error and 1 specific * byte for reposnse msg */ u8 params[3]; } __packed; struct pn533_acr122_apdu_hdr { u8 class; u8 ins; u8 p1; u8 p2; } __packed; struct pn533_acr122_tx_frame { struct pn533_acr122_ccid_hdr ccid; struct pn533_acr122_apdu_hdr apdu; u8 datalen; u8 data[]; /* pn533 frame: TFI ... */ } __packed; struct pn533_acr122_rx_frame { struct pn533_acr122_ccid_hdr ccid; u8 data[]; /* pn533 frame : TFI ... */ } __packed; static void pn533_acr122_tx_frame_init(void *_frame, u8 cmd_code) { struct pn533_acr122_tx_frame *frame = _frame; frame->ccid.type = PN533_ACR122_PC_TO_RDR_ESCAPE; /* sizeof(apdu_hdr) + sizeof(datalen) */ frame->ccid.datalen = sizeof(frame->apdu) + 1; frame->ccid.slot = 0; frame->ccid.seq = 0; frame->ccid.params[0] = 0; frame->ccid.params[1] = 0; frame->ccid.params[2] = 0; frame->data[0] = PN533_STD_FRAME_DIR_OUT; frame->data[1] = cmd_code; frame->datalen = 2; /* data[0] + data[1] */ frame->apdu.class = 0xFF; frame->apdu.ins = 0; frame->apdu.p1 = 0; frame->apdu.p2 = 0; } static void pn533_acr122_tx_frame_finish(void *_frame) { struct pn533_acr122_tx_frame *frame = _frame; frame->ccid.datalen += frame->datalen; } static void pn533_acr122_tx_update_payload_len(void *_frame, int len) { struct pn533_acr122_tx_frame *frame = _frame; frame->datalen += len; } static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev) { struct pn533_acr122_rx_frame *frame = _frame; if (frame->ccid.type != 0x83) return false; if (!frame->ccid.datalen) return false; if (frame->data[frame->ccid.datalen - 2] == 0x63) return false; return true; } static int pn533_acr122_rx_frame_size(void *frame) { struct pn533_acr122_rx_frame *f = frame; /* f->ccid.datalen already includes tail length */ return sizeof(struct pn533_acr122_rx_frame) + f->ccid.datalen; } static u8 pn533_acr122_get_cmd_code(void *frame) { struct pn533_acr122_rx_frame *f = frame; return PN533_FRAME_CMD(f); } static struct pn533_frame_ops pn533_acr122_frame_ops = { .tx_frame_init = pn533_acr122_tx_frame_init, .tx_frame_finish = pn533_acr122_tx_frame_finish, .tx_update_payload_len = pn533_acr122_tx_update_payload_len, .tx_header_len = PN533_ACR122_TX_FRAME_HEADER_LEN, .tx_tail_len = PN533_ACR122_TX_FRAME_TAIL_LEN, .rx_is_frame_valid = pn533_acr122_is_rx_frame_valid, .rx_header_len = PN533_ACR122_RX_FRAME_HEADER_LEN, .rx_tail_len = PN533_ACR122_RX_FRAME_TAIL_LEN, .rx_frame_size = pn533_acr122_rx_frame_size, .max_payload_len = PN533_ACR122_FRAME_MAX_PAYLOAD_LEN, .get_cmd_code = pn533_acr122_get_cmd_code, }; struct pn533_acr122_poweron_rdr_arg { int rc; struct completion done; }; static void pn533_acr122_poweron_rdr_resp(struct urb *urb) { struct pn533_acr122_poweron_rdr_arg *arg = urb->context; print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1, urb->transfer_buffer, urb->transfer_buffer_length, false); arg->rc = urb->status; complete(&arg->done); } static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) { /* Power on th reader (CCID cmd) */ u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON, 0, 0, 0, 0, 0, 0, 3, 0, 0}; char *buffer; int transferred; int rc; void *cntx; struct pn533_acr122_poweron_rdr_arg arg; buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL); if (!buffer) return -ENOMEM; init_completion(&arg.done); cntx = phy->in_urb->context; /* backup context */ phy->in_urb->complete = pn533_acr122_poweron_rdr_resp; phy->in_urb->context = &arg; print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, cmd, sizeof(cmd), false); rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd), &transferred, 5000); kfree(buffer); if (rc || (transferred != sizeof(cmd))) { nfc_err(&phy->udev->dev, "Reader power on cmd error %d\n", rc); return rc; } rc = usb_submit_urb(phy->in_urb, GFP_KERNEL); if (rc) { nfc_err(&phy->udev->dev, "Can't submit reader poweron cmd response %d\n", rc); return rc; } wait_for_completion(&arg.done); phy->in_urb->context = cntx; /* restore context */ return arg.rc; } static void pn533_out_complete(struct urb *urb) { struct pn533_out_arg *arg = urb->context; struct pn533_usb_phy *phy = arg->phy; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: dev_dbg(&phy->udev->dev, "The urb has been stopped (status %d)\n", urb->status); break; case -ESHUTDOWN: default: nfc_err(&phy->udev->dev, "Urb failure (status %d)\n", urb->status); } complete(&arg->done); } static void pn533_ack_complete(struct urb *urb) { struct pn533_usb_phy *phy = urb->context; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: dev_dbg(&phy->udev->dev, "The urb has been stopped (status %d)\n", urb->status); break; case -ESHUTDOWN: default: nfc_err(&phy->udev->dev, "Urb failure (status %d)\n", urb->status); } } static const struct pn533_phy_ops usb_phy_ops = { .send_frame = pn533_usb_send_frame, .send_ack = pn533_usb_send_ack, .abort_cmd = pn533_usb_abort_cmd, }; static int pn533_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct pn533 *priv; struct pn533_usb_phy *phy; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int in_endpoint = 0; int out_endpoint = 0; int rc = -ENOMEM; int i; u32 protocols; enum pn533_protocol_type protocol_type = PN533_PROTO_REQ_ACK_RESP; struct pn533_frame_ops *fops = NULL; unsigned char *in_buf; int in_buf_len = PN533_EXT_FRAME_HEADER_LEN + PN533_STD_FRAME_MAX_PAYLOAD_LEN + PN533_STD_FRAME_TAIL_LEN; phy = devm_kzalloc(&interface->dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; in_buf = kzalloc(in_buf_len, GFP_KERNEL); if (!in_buf) return -ENOMEM; phy->udev = usb_get_dev(interface_to_usbdev(interface)); phy->interface = interface; iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint)) in_endpoint = endpoint->bEndpointAddress; if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint)) out_endpoint = endpoint->bEndpointAddress; } if (!in_endpoint || !out_endpoint) { nfc_err(&interface->dev, "Could not find bulk-in or bulk-out endpoint\n"); rc = -ENODEV; goto error; } phy->in_urb = usb_alloc_urb(0, GFP_KERNEL); phy->out_urb = usb_alloc_urb(0, GFP_KERNEL); phy->ack_urb = usb_alloc_urb(0, GFP_KERNEL); if (!phy->in_urb || !phy->out_urb || !phy->ack_urb) goto error; usb_fill_bulk_urb(phy->in_urb, phy->udev, usb_rcvbulkpipe(phy->udev, in_endpoint), in_buf, in_buf_len, NULL, phy); usb_fill_bulk_urb(phy->out_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), NULL, 0, pn533_out_complete, phy); usb_fill_bulk_urb(phy->ack_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), NULL, 0, pn533_ack_complete, phy); switch (id->driver_info) { case PN533_DEVICE_STD: protocols = PN533_ALL_PROTOCOLS; break; case PN533_DEVICE_PASORI: protocols = PN533_NO_TYPE_B_PROTOCOLS; break; case PN533_DEVICE_ACR122U: protocols = PN533_NO_TYPE_B_PROTOCOLS; fops = &pn533_acr122_frame_ops; protocol_type = PN533_PROTO_REQ_RESP; rc = pn533_acr122_poweron_rdr(phy); if (rc < 0) { nfc_err(&interface->dev, "Couldn't poweron the reader (error %d)\n", rc); goto error; } break; default: nfc_err(&interface->dev, "Unknown device type %lu\n", id->driver_info); rc = -EINVAL; goto error; } priv = pn53x_common_init(id->driver_info, protocol_type, phy, &usb_phy_ops, fops, &phy->udev->dev); if (IS_ERR(priv)) { rc = PTR_ERR(priv); goto error; } phy->priv = priv; rc = pn533_finalize_setup(priv); if (rc) goto err_clean; usb_set_intfdata(interface, phy); rc = pn53x_register_nfc(priv, protocols, &interface->dev); if (rc) goto err_clean; return 0; err_clean: pn53x_common_clean(priv); error: usb_kill_urb(phy->in_urb); usb_kill_urb(phy->out_urb); usb_kill_urb(phy->ack_urb); usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); usb_free_urb(phy->ack_urb); usb_put_dev(phy->udev); kfree(in_buf); kfree(phy->ack_buffer); return rc; } static void pn533_usb_disconnect(struct usb_interface *interface) { struct pn533_usb_phy *phy = usb_get_intfdata(interface); if (!phy) return; pn53x_unregister_nfc(phy->priv); pn53x_common_clean(phy->priv); usb_set_intfdata(interface, NULL); usb_kill_urb(phy->in_urb); usb_kill_urb(phy->out_urb); usb_kill_urb(phy->ack_urb); kfree(phy->in_urb->transfer_buffer); usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); usb_free_urb(phy->ack_urb); kfree(phy->ack_buffer); nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n"); } static struct usb_driver pn533_usb_driver = { .name = "pn533_usb", .probe = pn533_usb_probe, .disconnect = pn533_usb_disconnect, .id_table = pn533_usb_table, }; module_usb_driver(pn533_usb_driver); MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>"); MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>"); MODULE_AUTHOR("Waldemar Rymarkiewicz <waldemar.rymarkiewicz@tieto.com>"); MODULE_DESCRIPTION("PN533 USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Type definitions for the multi-level security (MLS) policy. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ /* * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com> * Support for enhanced MLS infrastructure. * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. */ #ifndef _SS_MLS_TYPES_H_ #define _SS_MLS_TYPES_H_ #include "security.h" #include "ebitmap.h" struct mls_level { u32 sens; /* sensitivity */ struct ebitmap cat; /* category set */ }; struct mls_range { struct mls_level level[2]; /* low == level[0], high == level[1] */ }; static inline int mls_level_eq(const struct mls_level *l1, const struct mls_level *l2) { return ((l1->sens == l2->sens) && ebitmap_equal(&l1->cat, &l2->cat)); } static inline int mls_level_dom(const struct mls_level *l1, const struct mls_level *l2) { return ((l1->sens >= l2->sens) && ebitmap_contains(&l1->cat, &l2->cat, 0)); } #define mls_level_incomp(l1, l2) \ (!mls_level_dom((l1), (l2)) && !mls_level_dom((l2), (l1))) #define mls_level_between(l1, l2, l3) \ (mls_level_dom((l1), (l2)) && mls_level_dom((l3), (l1))) #define mls_range_contains(r1, r2) \ (mls_level_dom(&(r2).level[0], &(r1).level[0]) && \ mls_level_dom(&(r1).level[1], &(r2).level[1])) #endif /* _SS_MLS_TYPES_H_ */ |
4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 | // SPDX-License-Identifier: GPL-2.0-only /* DVB USB compliant linux driver for mobile DVB-T USB devices based on * reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-C/P) * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * based on GPL code from DiBcom, which has * Copyright (C) 2004 Amaury Demol for DiBcom * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "dibusb.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /* USB Driver stuff */ static struct dvb_usb_device_properties dibusb_mc_properties; static int dibusb_mc_probe(struct usb_interface *intf, const struct usb_device_id *id) { return dvb_usb_device_init(intf, &dibusb_mc_properties, THIS_MODULE, NULL, adapter_nr); } /* do not change the order of the ID table */ enum { DIBCOM_MOD3001_COLD, DIBCOM_MOD3001_WARM, ULTIMA_TVBOX_USB2_COLD, ULTIMA_TVBOX_USB2_WARM, LITEON_DVB_T_COLD, LITEON_DVB_T_WARM, EMPIA_DIGIVOX_MINI_SL_COLD, EMPIA_DIGIVOX_MINI_SL_WARM, GRANDTEC_DVBT_USB2_COLD, GRANDTEC_DVBT_USB2_WARM, ULTIMA_ARTEC_T14_COLD, ULTIMA_ARTEC_T14_WARM, LEADTEK_WINFAST_DTV_DONGLE_COLD, LEADTEK_WINFAST_DTV_DONGLE_WARM, HUMAX_DVB_T_STICK_HIGH_SPEED_COLD, HUMAX_DVB_T_STICK_HIGH_SPEED_WARM, }; static const struct usb_device_id dibusb_dib3000mc_table[] = { DVB_USB_DEV(DIBCOM, DIBCOM_MOD3001_COLD), DVB_USB_DEV(DIBCOM, DIBCOM_MOD3001_WARM), DVB_USB_DEV(ULTIMA_ELECTRONIC, ULTIMA_TVBOX_USB2_COLD), DVB_USB_DEV(ULTIMA_ELECTRONIC, ULTIMA_TVBOX_USB2_WARM), DVB_USB_DEV(LITEON, LITEON_DVB_T_COLD), DVB_USB_DEV(LITEON, LITEON_DVB_T_WARM), DVB_USB_DEV(EMPIA, EMPIA_DIGIVOX_MINI_SL_COLD), DVB_USB_DEV(EMPIA, EMPIA_DIGIVOX_MINI_SL_WARM), DVB_USB_DEV(GRANDTEC, GRANDTEC_DVBT_USB2_COLD), DVB_USB_DEV(GRANDTEC, GRANDTEC_DVBT_USB2_WARM), DVB_USB_DEV(ULTIMA_ELECTRONIC, ULTIMA_ARTEC_T14_COLD), DVB_USB_DEV(ULTIMA_ELECTRONIC, ULTIMA_ARTEC_T14_WARM), DVB_USB_DEV(LEADTEK, LEADTEK_WINFAST_DTV_DONGLE_COLD), DVB_USB_DEV(LEADTEK, LEADTEK_WINFAST_DTV_DONGLE_WARM), DVB_USB_DEV(HUMAX_COEX, HUMAX_DVB_T_STICK_HIGH_SPEED_COLD), DVB_USB_DEV(HUMAX_COEX, HUMAX_DVB_T_STICK_HIGH_SPEED_WARM), { } }; MODULE_DEVICE_TABLE (usb, dibusb_dib3000mc_table); static struct dvb_usb_device_properties dibusb_mc_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-dibusb-6.0.0.8.fw", .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .streaming_ctrl = dibusb2_0_streaming_ctrl, .pid_filter = dibusb_pid_filter, .pid_filter_ctrl = dibusb_pid_filter_ctrl, .frontend_attach = dibusb_dib3000mc_frontend_attach, .tuner_attach = dibusb_dib3000mc_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x06, .u = { .bulk = { .buffersize = 4096, } } }, }}, .size_of_priv = sizeof(struct dibusb_state), } }, .power_ctrl = dibusb2_0_power_ctrl, .rc.legacy = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_map_table = rc_map_dibusb_table, .rc_map_size = 111, /* FIXME */ .rc_query = dibusb_rc_query, }, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 8, .devices = { { "DiBcom USB2.0 DVB-T reference design (MOD3000P)", { &dibusb_dib3000mc_table[DIBCOM_MOD3001_COLD], NULL }, { &dibusb_dib3000mc_table[DIBCOM_MOD3001_WARM], NULL }, }, { "Artec T1 USB2.0 TVBOX (please check the warm ID)", { &dibusb_dib3000mc_table[ULTIMA_TVBOX_USB2_COLD], NULL }, { &dibusb_dib3000mc_table[ULTIMA_TVBOX_USB2_WARM], NULL }, }, { "LITE-ON USB2.0 DVB-T Tuner", /* Also rebranded as Intuix S800, Toshiba */ { &dibusb_dib3000mc_table[LITEON_DVB_T_COLD], NULL }, { &dibusb_dib3000mc_table[LITEON_DVB_T_WARM], NULL }, }, { "MSI Digivox Mini SL", { &dibusb_dib3000mc_table[EMPIA_DIGIVOX_MINI_SL_COLD], NULL }, { &dibusb_dib3000mc_table[EMPIA_DIGIVOX_MINI_SL_WARM], NULL }, }, { "GRAND - USB2.0 DVB-T adapter", { &dibusb_dib3000mc_table[GRANDTEC_DVBT_USB2_COLD], NULL }, { &dibusb_dib3000mc_table[GRANDTEC_DVBT_USB2_WARM], NULL }, }, { "Artec T14 - USB2.0 DVB-T", { &dibusb_dib3000mc_table[ULTIMA_ARTEC_T14_COLD], NULL }, { &dibusb_dib3000mc_table[ULTIMA_ARTEC_T14_WARM], NULL }, }, { "Leadtek - USB2.0 Winfast DTV dongle", { &dibusb_dib3000mc_table[LEADTEK_WINFAST_DTV_DONGLE_COLD], NULL }, { &dibusb_dib3000mc_table[LEADTEK_WINFAST_DTV_DONGLE_WARM], NULL }, }, { "Humax/Coex DVB-T USB Stick 2.0 High Speed", { &dibusb_dib3000mc_table[HUMAX_DVB_T_STICK_HIGH_SPEED_COLD], NULL }, { &dibusb_dib3000mc_table[HUMAX_DVB_T_STICK_HIGH_SPEED_WARM], NULL }, }, { NULL }, } }; static struct usb_driver dibusb_mc_driver = { .name = "dvb_usb_dibusb_mc", .probe = dibusb_mc_probe, .disconnect = dvb_usb_device_exit, .id_table = dibusb_dib3000mc_table, }; module_usb_driver(dibusb_mc_driver); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for DiBcom USB2.0 DVB-T (DiB3000M-C/P based) devices"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); |
5 5 5 5 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/videodev2.h> #include "pvrusb2-hdw.h" #include "pvrusb2-devattr.h" #include "pvrusb2-context.h" #include "pvrusb2-debug.h" #include "pvrusb2-v4l2.h" #include "pvrusb2-sysfs.h" #define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>" #define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner" #define DRIVER_VERSION "V4L in-tree version" #define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \ PVR2_TRACE_INFO| \ PVR2_TRACE_STD| \ PVR2_TRACE_TOLERANCE| \ PVR2_TRACE_TRAP| \ 0) int pvrusb2_debug = DEFAULT_DEBUG_MASK; module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug, "Debug trace mask"); static void pvr_setup_attach(struct pvr2_context *pvr) { /* Create association with v4l layer */ pvr2_v4l2_create(pvr); #ifdef CONFIG_VIDEO_PVRUSB2_DVB /* Create association with dvb layer */ pvr2_dvb_create(pvr); #endif pvr2_sysfs_create(pvr); } static int pvr_probe(struct usb_interface *intf, const struct usb_device_id *devid) { struct pvr2_context *pvr; /* Create underlying hardware interface */ pvr = pvr2_context_create(intf,devid,pvr_setup_attach); if (!pvr) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Failed to create hdw handler"); return -ENOMEM; } pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr); usb_set_intfdata(intf, pvr); return 0; } /* * pvr_disconnect() * */ static void pvr_disconnect(struct usb_interface *intf) { struct pvr2_context *pvr = usb_get_intfdata(intf); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr); usb_set_intfdata (intf, NULL); pvr2_context_disconnect(pvr); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr); } static struct usb_driver pvr_driver = { .name = "pvrusb2", .id_table = pvr2_device_table, .probe = pvr_probe, .disconnect = pvr_disconnect }; /* * pvr_init() / pvr_exit() * * This code is run to initialize/exit the driver. * */ static int __init pvr_init(void) { int ret; pvr2_trace(PVR2_TRACE_INIT,"pvr_init"); ret = pvr2_context_global_init(); if (ret != 0) { pvr2_trace(PVR2_TRACE_INIT,"pvr_init failure code=%d",ret); return ret; } pvr2_sysfs_class_create(); ret = usb_register(&pvr_driver); if (ret == 0) pr_info("pvrusb2: " DRIVER_VERSION ":" DRIVER_DESC "\n"); if (pvrusb2_debug) pr_info("pvrusb2: Debug mask is %d (0x%x)\n", pvrusb2_debug,pvrusb2_debug); pvr2_trace(PVR2_TRACE_INIT,"pvr_init complete"); return ret; } static void __exit pvr_exit(void) { pvr2_trace(PVR2_TRACE_INIT,"pvr_exit"); usb_deregister(&pvr_driver); pvr2_context_global_done(); pvr2_sysfs_class_destroy(); pvr2_trace(PVR2_TRACE_INIT,"pvr_exit complete"); } module_init(pvr_init); module_exit(pvr_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION("0.9.1"); |
2 2 1 1 1 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 | // SPDX-License-Identifier: GPL-2.0-or-later /* * USB-to-WWAN Driver for Sierra Wireless modems * * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer * <linux@sierrawireless.com> * * Portions of this based on the cdc_ether driver by David Brownell (2003-2005) * and Ole Andre Vadla Ravnas (ActiveSync) (2006). * * IMPORTANT DISCLAIMER: This driver is not commercially supported by * Sierra Wireless. Use at your own risk. */ #define DRIVER_VERSION "v.2.0" #define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer" #define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems" static const char driver_name[] = "sierra_net"; /* if defined debug messages enabled */ /*#define DEBUG*/ #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <net/ip.h> #include <net/udp.h> #include <linux/unaligned.h> #include <linux/usb/usbnet.h> #define SWI_USB_REQUEST_GET_FW_ATTR 0x06 #define SWI_GET_FW_ATTR_MASK 0x08 /* atomic counter partially included in MAC address to make sure 2 devices * do not end up with the same MAC - concept breaks in case of > 255 ifaces */ static atomic_t iface_counter = ATOMIC_INIT(0); /* * SYNC Timer Delay definition used to set the expiry time */ #define SIERRA_NET_SYNCDELAY (2*HZ) /* Max. MTU supported. The modem buffers are limited to 1500 */ #define SIERRA_NET_MAX_SUPPORTED_MTU 1500 /* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control * message reception ... and thus the max. received packet. * (May be the cause for parse_hip returning -EINVAL) */ #define SIERRA_NET_USBCTL_BUF_LEN 1024 /* Overriding the default usbnet rx_urb_size */ #define SIERRA_NET_RX_URB_SIZE (8 * 1024) /* Private data structure */ struct sierra_net_data { u16 link_up; /* air link up or down */ u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ u8 sync_msg[4]; /* SYNC message */ u8 shdwn_msg[4]; /* Shutdown message */ /* Backpointer to the container */ struct usbnet *usbnet; u8 ifnum; /* interface number */ /* Bit masks, must be a power of 2 */ #define SIERRA_NET_EVENT_RESP_AVAIL 0x01 #define SIERRA_NET_TIMER_EXPIRY 0x02 unsigned long kevent_flags; struct work_struct sierra_net_kevent; struct timer_list sync_timer; /* For retrying SYNC sequence */ }; struct param { int is_present; union { void *ptr; u32 dword; u16 word; u8 byte; }; }; /* HIP message type */ #define SIERRA_NET_HIP_EXTENDEDID 0x7F #define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */ #define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */ #define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */ #define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */ #define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202 #define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002 /* 3G UMTS Link Sense Indication definitions */ #define SIERRA_NET_HIP_LSI_UMTSID 0x78 /* Reverse Channel Grant Indication HIP message */ #define SIERRA_NET_HIP_RCGI 0x64 /* LSI Protocol types */ #define SIERRA_NET_PROTOCOL_UMTS 0x01 #define SIERRA_NET_PROTOCOL_UMTS_DS 0x04 /* LSI Coverage */ #define SIERRA_NET_COVERAGE_NONE 0x00 #define SIERRA_NET_COVERAGE_NOPACKET 0x01 /* LSI Session */ #define SIERRA_NET_SESSION_IDLE 0x00 /* LSI Link types */ #define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00 #define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02 struct lsi_umts { u8 protocol; u8 unused1; __be16 length; /* eventually use a union for the rest - assume umts for now */ u8 coverage; u8 network_len; /* network name len */ u8 network[40]; /* network name (UCS2, bigendian) */ u8 session_state; u8 unused3[33]; } __packed; struct lsi_umts_single { struct lsi_umts lsi; u8 link_type; u8 pdp_addr_len; /* NW-supplied PDP address len */ u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ u8 unused4[23]; u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */ u8 dns1_addr[16]; /* NW-supplied 1st DNS address */ u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */ u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/ u8 wins1_addr_len; /* NW-supplied 1st Wins address len */ u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/ u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */ u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */ u8 unused5[4]; u8 gw_addr_len; /* NW-supplied GW address len */ u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ u8 reserved[8]; } __packed; struct lsi_umts_dual { struct lsi_umts lsi; u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */ u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */ u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */ u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */ u8 unused4[23]; u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */ u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */ u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */ u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/ u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */ u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */ u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */ u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/ u8 unused5[68]; } __packed; #define SIERRA_NET_LSI_COMMON_LEN 4 #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single)) #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) #define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual)) #define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \ (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN) /* Our own net device operations structure */ static const struct net_device_ops sierra_net_device_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* get private data associated with passed in usbnet device */ static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev) { return (struct sierra_net_data *)dev->data[0]; } /* set private data associated with passed in usbnet device */ static inline void sierra_net_set_private(struct usbnet *dev, struct sierra_net_data *priv) { dev->data[0] = (unsigned long)priv; } /* is packet IPv4/IPv6 */ static inline int is_ip(struct sk_buff *skb) { return skb->protocol == cpu_to_be16(ETH_P_IP) || skb->protocol == cpu_to_be16(ETH_P_IPV6); } /* * check passed in packet and make sure that: * - it is linear (no scatter/gather) * - it is ethernet (mac_header properly set) */ static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev) { skb_reset_mac_header(skb); /* ethernet header */ if (skb_is_nonlinear(skb)) { netdev_err(dev->net, "Non linear buffer-dropping\n"); return 0; } if (!pskb_may_pull(skb, ETH_HLEN)) return 0; skb->protocol = eth_hdr(skb)->h_proto; return 1; } static const u8 *save16bit(struct param *p, const u8 *datap) { p->is_present = 1; p->word = get_unaligned_be16(datap); return datap + sizeof(p->word); } static const u8 *save8bit(struct param *p, const u8 *datap) { p->is_present = 1; p->byte = *datap; return datap + sizeof(p->byte); } /*----------------------------------------------------------------------------* * BEGIN HIP * *----------------------------------------------------------------------------*/ /* HIP header */ #define SIERRA_NET_HIP_HDR_LEN 4 /* Extended HIP header */ #define SIERRA_NET_HIP_EXT_HDR_LEN 6 struct hip_hdr { int hdrlen; struct param payload_len; struct param msgid; struct param msgspecific; struct param extmsgid; }; static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh) { const u8 *curp = buf; int padded; if (buflen < SIERRA_NET_HIP_HDR_LEN) return -EPROTO; curp = save16bit(&hh->payload_len, curp); curp = save8bit(&hh->msgid, curp); curp = save8bit(&hh->msgspecific, curp); padded = hh->msgid.byte & 0x80; hh->msgid.byte &= 0x7F; /* 7 bits */ hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID); if (hh->extmsgid.is_present) { if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN) return -EPROTO; hh->payload_len.word &= 0x3FFF; /* 14 bits */ curp = save16bit(&hh->extmsgid, curp); hh->extmsgid.word &= 0x03FF; /* 10 bits */ hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN; } else { hh->payload_len.word &= 0x07FF; /* 11 bits */ hh->hdrlen = SIERRA_NET_HIP_HDR_LEN; } if (padded) { hh->hdrlen++; hh->payload_len.word--; } /* if real packet shorter than the claimed length */ if (buflen < (hh->hdrlen + hh->payload_len.word)) return -EINVAL; return 0; } static void build_hip(u8 *buf, const u16 payloadlen, struct sierra_net_data *priv) { /* the following doesn't have the full functionality. We * currently build only one kind of header, so it is faster this way */ put_unaligned_be16(payloadlen, buf); memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template)); } /*----------------------------------------------------------------------------* * END HIP * *----------------------------------------------------------------------------*/ static int sierra_net_send_cmd(struct usbnet *dev, u8 *cmd, int cmdlen, const char * cmd_name) { struct sierra_net_data *priv = sierra_net_get_private(dev); int status; status = usbnet_write_cmd(dev, USB_CDC_SEND_ENCAPSULATED_COMMAND, USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0, priv->ifnum, cmd, cmdlen); if (status != cmdlen && status != -ENODEV) netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status); return status; } static int sierra_net_send_sync(struct usbnet *dev) { int status; struct sierra_net_data *priv = sierra_net_get_private(dev); dev_dbg(&dev->udev->dev, "%s", __func__); status = sierra_net_send_cmd(dev, priv->sync_msg, sizeof(priv->sync_msg), "SYNC"); return status; } static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix) { dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix); priv->tx_hdr_template[0] = 0x3F; priv->tx_hdr_template[1] = ctx_ix; *((__be16 *)&priv->tx_hdr_template[2]) = cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID); } static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) { struct lsi_umts *lsi = (struct lsi_umts *)data; u32 expected_length; if (datalen < sizeof(struct lsi_umts_single)) { netdev_err(dev->net, "%s: Data length %d, exp >= %zu\n", __func__, datalen, sizeof(struct lsi_umts_single)); return -1; } /* Validate the session state */ if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { netdev_err(dev->net, "Session idle, 0x%02x\n", lsi->session_state); return 0; } /* Validate the protocol - only support UMTS for now */ if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) { struct lsi_umts_single *single = (struct lsi_umts_single *)lsi; /* Validate the link type */ if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 && single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) { netdev_err(dev->net, "Link type unsupported: 0x%02x\n", single->link_type); return -1; } expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN; } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) { expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN; } else { netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", lsi->protocol); return -1; } if (be16_to_cpu(lsi->length) != expected_length) { netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", __func__, be16_to_cpu(lsi->length), expected_length); return -1; } /* Validate the coverage */ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); return 0; } /* Set link_sense true */ return 1; } static void sierra_net_handle_lsi(struct usbnet *dev, char *data, struct hip_hdr *hh) { struct sierra_net_data *priv = sierra_net_get_private(dev); int link_up; link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen, hh->payload_len.word); if (link_up < 0) { netdev_err(dev->net, "Invalid LSI\n"); return; } if (link_up) { sierra_net_set_ctx_index(priv, hh->msgspecific.byte); priv->link_up = 1; } else { priv->link_up = 0; } usbnet_link_change(dev, link_up, 0); } static void sierra_net_dosync(struct usbnet *dev) { int status; struct sierra_net_data *priv = sierra_net_get_private(dev); dev_dbg(&dev->udev->dev, "%s", __func__); /* The SIERRA_NET_HIP_MSYNC_ID command appears to request that the * firmware restart itself. After restarting, the modem will respond * with the SIERRA_NET_HIP_RESTART_ID indication. The driver continues * sending MSYNC commands every few seconds until it receives the * RESTART event from the firmware */ /* tell modem we are ready */ status = sierra_net_send_sync(dev); if (status < 0) netdev_err(dev->net, "Send SYNC failed, status %d\n", status); status = sierra_net_send_sync(dev); if (status < 0) netdev_err(dev->net, "Send SYNC failed, status %d\n", status); /* Now, start a timer and make sure we get the Restart Indication */ priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY; add_timer(&priv->sync_timer); } static void sierra_net_kevent(struct work_struct *work) { struct sierra_net_data *priv = container_of(work, struct sierra_net_data, sierra_net_kevent); struct usbnet *dev = priv->usbnet; int len; int err; u8 *buf; u8 ifnum; if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) { clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags); /* Query the modem for the LSI message */ buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL); if (!buf) return; ifnum = priv->ifnum; len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), USB_CDC_GET_ENCAPSULATED_RESPONSE, USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN, USB_CTRL_SET_TIMEOUT); if (len < 0) { netdev_err(dev->net, "usb_control_msg failed, status %d\n", len); } else { struct hip_hdr hh; dev_dbg(&dev->udev->dev, "%s: Received status message," " %04x bytes", __func__, len); err = parse_hip(buf, len, &hh); if (err) { netdev_err(dev->net, "%s: Bad packet," " parse result %d\n", __func__, err); kfree(buf); return; } /* Validate packet length */ if (len != hh.hdrlen + hh.payload_len.word) { netdev_err(dev->net, "%s: Bad packet, received" " %d, expected %d\n", __func__, len, hh.hdrlen + hh.payload_len.word); kfree(buf); return; } /* Switch on received message types */ switch (hh.msgid.byte) { case SIERRA_NET_HIP_LSI_UMTSID: dev_dbg(&dev->udev->dev, "LSI for ctx:%d", hh.msgspecific.byte); sierra_net_handle_lsi(dev, buf, &hh); break; case SIERRA_NET_HIP_RESTART_ID: dev_dbg(&dev->udev->dev, "Restart reported: %d," " stopping sync timer", hh.msgspecific.byte); /* Got sync resp - stop timer & clear mask */ timer_delete_sync(&priv->sync_timer); clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags); break; case SIERRA_NET_HIP_HSYNC_ID: dev_dbg(&dev->udev->dev, "SYNC received"); err = sierra_net_send_sync(dev); if (err < 0) netdev_err(dev->net, "Send SYNC failed %d\n", err); break; case SIERRA_NET_HIP_EXTENDEDID: netdev_err(dev->net, "Unrecognized HIP msg, " "extmsgid 0x%04x\n", hh.extmsgid.word); break; case SIERRA_NET_HIP_RCGI: /* Ignored */ break; default: netdev_err(dev->net, "Unrecognized HIP msg, " "msgid 0x%02x\n", hh.msgid.byte); break; } } kfree(buf); } /* The sync timer bit might be set */ if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) { clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags); dev_dbg(&dev->udev->dev, "Deferred sync timer expiry"); sierra_net_dosync(priv->usbnet); } if (priv->kevent_flags) dev_dbg(&dev->udev->dev, "sierra_net_kevent done, " "kevent_flags = 0x%lx", priv->kevent_flags); } static void sierra_net_defer_kevent(struct usbnet *dev, int work) { struct sierra_net_data *priv = sierra_net_get_private(dev); set_bit(work, &priv->kevent_flags); schedule_work(&priv->sierra_net_kevent); } /* * Sync Retransmit Timer Handler. On expiry, kick the work queue */ static void sierra_sync_timer(struct timer_list *t) { struct sierra_net_data *priv = timer_container_of(priv, t, sync_timer); struct usbnet *dev = priv->usbnet; dev_dbg(&dev->udev->dev, "%s", __func__); /* Kick the tasklet */ sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY); } static void sierra_net_status(struct usbnet *dev, struct urb *urb) { struct usb_cdc_notification *event; dev_dbg(&dev->udev->dev, "%s", __func__); if (urb->actual_length < sizeof *event) return; /* Add cases to handle other standard notifications. */ event = urb->transfer_buffer; switch (event->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: case USB_CDC_NOTIFY_SPEED_CHANGE: /* USB 305 sends those */ break; case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL); break; default: netdev_err(dev->net, ": unexpected notification %02x!\n", event->bNotificationType); break; } } static void sierra_net_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { /* Inherit standard device info */ usbnet_get_drvinfo(net, info); strscpy(info->driver, driver_name, sizeof(info->driver)); strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); } static u32 sierra_net_get_link(struct net_device *net) { struct usbnet *dev = netdev_priv(net); /* Report link is down whenever the interface is down */ return sierra_net_get_private(dev)->link_up && netif_running(net); } static const struct ethtool_ops sierra_net_ethtool_ops = { .get_drvinfo = sierra_net_get_drvinfo, .get_link = sierra_net_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .nway_reset = usbnet_nway_reset, .get_link_ksettings = usbnet_get_link_ksettings_mii, .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) { int result = 0; __le16 attrdata; result = usbnet_read_cmd(dev, /* _u8 vendor specific request */ SWI_USB_REQUEST_GET_FW_ATTR, USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */ 0x0000, /* __u16 value not used */ 0x0000, /* __u16 index not used */ &attrdata, /* char *data */ sizeof(attrdata) /* __u16 size */ ); if (result < 0) return -EIO; *datap = le16_to_cpu(attrdata); return result; } /* * collects the bulk endpoints, the status endpoint. */ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) { u8 ifacenum; u8 numendpoints; u16 fwattr = 0; int status; struct sierra_net_data *priv; static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; u8 mod[2]; dev_dbg(&dev->udev->dev, "%s", __func__); ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; numendpoints = intf->cur_altsetting->desc.bNumEndpoints; /* We have three endpoints, bulk in and out, and a status */ if (numendpoints != 3) { dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d", numendpoints); return -ENODEV; } /* Status endpoint set in usbnet_get_endpoints() */ dev->status = NULL; status = usbnet_get_endpoints(dev, intf); if (status < 0) { dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)", status); return -ENODEV; } /* Initialize sierra private data */ priv = kzalloc(sizeof *priv, GFP_KERNEL); if (!priv) return -ENOMEM; priv->usbnet = dev; priv->ifnum = ifacenum; dev->net->netdev_ops = &sierra_net_device_ops; /* change MAC addr to include, ifacenum, and to be unique */ mod[0] = atomic_inc_return(&iface_counter); mod[1] = ifacenum; dev_addr_mod(dev->net, ETH_ALEN - 2, mod, 2); /* prepare shutdown message template */ memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); /* set context index initially to 0 - prepares tx hdr template */ sierra_net_set_ctx_index(priv, 0); /* prepare sync message template */ memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE; if (dev->udev->speed != USB_SPEED_HIGH) dev->rx_urb_size = min_t(size_t, 4096, SIERRA_NET_RX_URB_SIZE); dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; dev->net->max_mtu = SIERRA_NET_MAX_SUPPORTED_MTU; /* Set up the netdev */ dev->net->flags |= IFF_NOARP; dev->net->ethtool_ops = &sierra_net_ethtool_ops; netif_carrier_off(dev->net); sierra_net_set_private(dev, priv); priv->kevent_flags = 0; /* Use the shared workqueue */ INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent); /* Only need to do this once */ timer_setup(&priv->sync_timer, sierra_sync_timer, 0); /* verify fw attributes */ status = sierra_net_get_fw_attr(dev, &fwattr); dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr); /* test whether firmware supports DHCP */ if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) { /* found incompatible firmware version */ dev_err(&dev->udev->dev, "Incompatible driver and firmware" " versions\n"); kfree(priv); return -ENODEV; } return 0; } static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf) { int status; struct sierra_net_data *priv = sierra_net_get_private(dev); dev_dbg(&dev->udev->dev, "%s", __func__); /* kill the timer and work */ timer_shutdown_sync(&priv->sync_timer); cancel_work_sync(&priv->sierra_net_kevent); /* tell modem we are going away */ status = sierra_net_send_cmd(dev, priv->shdwn_msg, sizeof(priv->shdwn_msg), "Shutdown"); if (status < 0) netdev_err(dev->net, "usb_control_msg failed, status %d\n", status); usbnet_status_stop(dev); sierra_net_set_private(dev, NULL); kfree(priv); } static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev, struct sk_buff *skb, int len) { struct sk_buff *new_skb; /* clone skb */ new_skb = skb_clone(skb, GFP_ATOMIC); /* remove len bytes from original */ skb_pull(skb, len); /* trim next packet to it's length */ if (new_skb) { skb_trim(new_skb, len); } else { if (netif_msg_rx_err(dev)) netdev_err(dev->net, "failed to get skb\n"); dev->net->stats.rx_dropped++; } return new_skb; } /* ---------------------------- Receive data path ----------------------*/ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { int err; struct hip_hdr hh; struct sk_buff *new_skb; dev_dbg(&dev->udev->dev, "%s", __func__); /* could contain multiple packets */ while (likely(skb->len)) { err = parse_hip(skb->data, skb->len, &hh); if (err) { if (netif_msg_rx_err(dev)) netdev_err(dev->net, "Invalid HIP header %d\n", err); /* dev->net->stats.rx_errors incremented by caller */ dev->net->stats.rx_length_errors++; return 0; } /* Validate Extended HIP header */ if (!hh.extmsgid.is_present || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) { if (netif_msg_rx_err(dev)) netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); dev->net->stats.rx_frame_errors++; /* dev->net->stats.rx_errors incremented by caller */ return 0; } skb_pull(skb, hh.hdrlen); /* We are going to accept this packet, prepare it. * In case protocol is IPv6, keep it, otherwise force IPv4. */ skb_reset_mac_header(skb); if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6)) eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP); eth_zero_addr(eth_hdr(skb)->h_source); memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); /* Last packet in batch handled by usbnet */ if (hh.payload_len.word == skb->len) return 1; new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word); if (new_skb) usbnet_skb_return(dev, new_skb); } /* while */ return 0; } /* ---------------------------- Transmit data path ----------------------*/ static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sierra_net_data *priv = sierra_net_get_private(dev); u16 len; bool need_tail; BUILD_BUG_ON(sizeof_field(struct usbnet, data) < sizeof(struct cdc_state)); dev_dbg(&dev->udev->dev, "%s", __func__); if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) { /* enough head room as is? */ if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) { /* Save the Eth/IP length and set up HIP hdr */ len = skb->len; skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN); /* Handle ZLP issue */ need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN) % dev->maxpacket == 0); if (need_tail) { if (unlikely(skb_tailroom(skb) == 0)) { netdev_err(dev->net, "tx_fixup:" "no room for packet\n"); dev_kfree_skb_any(skb); return NULL; } else { skb->data[skb->len] = 0; __skb_put(skb, 1); len = len + 1; } } build_hip(skb->data, len, priv); return skb; } else { /* * compensate in the future if necessary */ netdev_err(dev->net, "tx_fixup: no room for HIP\n"); } /* headroom */ } if (!priv->link_up) dev->net->stats.tx_carrier_errors++; /* tx_dropped incremented by usbnet */ /* filter the packet out, release it */ dev_kfree_skb_any(skb); return NULL; } static const struct driver_info sierra_net_info_direct_ip = { .description = "Sierra Wireless USB-to-WWAN Modem", .flags = FLAG_WWAN | FLAG_SEND_ZLP, .bind = sierra_net_bind, .unbind = sierra_net_unbind, .status = sierra_net_status, .rx_fixup = sierra_net_rx_fixup, .tx_fixup = sierra_net_tx_fixup, }; static int sierra_net_probe(struct usb_interface *udev, const struct usb_device_id *prod) { int ret; ret = usbnet_probe(udev, prod); if (ret == 0) { struct usbnet *dev = usb_get_intfdata(udev); ret = usbnet_status_start(dev, GFP_KERNEL); if (ret == 0) { /* Interrupt URB now set up; initiate sync sequence */ sierra_net_dosync(dev); } } return ret; } #define DIRECT_IP_DEVICE(vend, prod) \ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \ .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \ .driver_info = (unsigned long)&sierra_net_info_direct_ip} static const struct usb_device_id products[] = { DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ {}, /* last item */ }; MODULE_DEVICE_TABLE(usb, products); /* We are based on usbnet, so let it handle the USB driver specifics */ static struct usb_driver sierra_net_driver = { .name = "sierra_net", .id_table = products, .probe = sierra_net_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .no_dynamic_id = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(sierra_net_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM spi #if !defined(_TRACE_SPI_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SPI_H #include <linux/ktime.h> #include <linux/tracepoint.h> DECLARE_EVENT_CLASS(spi_controller, TP_PROTO(struct spi_controller *controller), TP_ARGS(controller), TP_STRUCT__entry( __field( int, bus_num ) ), TP_fast_assign( __entry->bus_num = controller->bus_num; ), TP_printk("spi%d", (int)__entry->bus_num) ); DEFINE_EVENT(spi_controller, spi_controller_idle, TP_PROTO(struct spi_controller *controller), TP_ARGS(controller) ); DEFINE_EVENT(spi_controller, spi_controller_busy, TP_PROTO(struct spi_controller *controller), TP_ARGS(controller) ); TRACE_EVENT(spi_setup, TP_PROTO(struct spi_device *spi, int status), TP_ARGS(spi, status), TP_STRUCT__entry( __field(int, bus_num) __field(int, chip_select) __field(unsigned long, mode) __field(unsigned int, bits_per_word) __field(unsigned int, max_speed_hz) __field(int, status) ), TP_fast_assign( __entry->bus_num = spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(spi, 0); __entry->mode = spi->mode; __entry->bits_per_word = spi->bits_per_word; __entry->max_speed_hz = spi->max_speed_hz; __entry->status = status; ), TP_printk("spi%d.%d setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d", __entry->bus_num, __entry->chip_select, (__entry->mode & SPI_MODE_X_MASK), (__entry->mode & SPI_CS_HIGH) ? "cs_high, " : "", (__entry->mode & SPI_LSB_FIRST) ? "lsb, " : "", (__entry->mode & SPI_3WIRE) ? "3wire, " : "", (__entry->mode & SPI_LOOP) ? "loopback, " : "", __entry->bits_per_word, __entry->max_speed_hz, __entry->status) ); TRACE_EVENT(spi_set_cs, TP_PROTO(struct spi_device *spi, bool enable), TP_ARGS(spi, enable), TP_STRUCT__entry( __field(int, bus_num) __field(int, chip_select) __field(unsigned long, mode) __field(bool, enable) ), TP_fast_assign( __entry->bus_num = spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(spi, 0); __entry->mode = spi->mode; __entry->enable = enable; ), TP_printk("spi%d.%d %s%s", __entry->bus_num, __entry->chip_select, __entry->enable ? "activate" : "deactivate", (__entry->mode & SPI_CS_HIGH) ? ", cs_high" : "") ); DECLARE_EVENT_CLASS(spi_message, TP_PROTO(struct spi_message *msg), TP_ARGS(msg), TP_STRUCT__entry( __field( int, bus_num ) __field( int, chip_select ) __field( struct spi_message *, msg ) ), TP_fast_assign( __entry->bus_num = msg->spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(msg->spi, 0); __entry->msg = msg; ), TP_printk("spi%d.%d %p", (int)__entry->bus_num, (int)__entry->chip_select, (struct spi_message *)__entry->msg) ); DEFINE_EVENT(spi_message, spi_message_submit, TP_PROTO(struct spi_message *msg), TP_ARGS(msg) ); DEFINE_EVENT(spi_message, spi_message_start, TP_PROTO(struct spi_message *msg), TP_ARGS(msg) ); TRACE_EVENT(spi_message_done, TP_PROTO(struct spi_message *msg), TP_ARGS(msg), TP_STRUCT__entry( __field( int, bus_num ) __field( int, chip_select ) __field( struct spi_message *, msg ) __field( unsigned, frame ) __field( unsigned, actual ) ), TP_fast_assign( __entry->bus_num = msg->spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(msg->spi, 0); __entry->msg = msg; __entry->frame = msg->frame_length; __entry->actual = msg->actual_length; ), TP_printk("spi%d.%d %p len=%u/%u", (int)__entry->bus_num, (int)__entry->chip_select, (struct spi_message *)__entry->msg, (unsigned)__entry->actual, (unsigned)__entry->frame) ); /* * Consider a buffer valid if non-NULL and if it doesn't match the dummy buffer * that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or * SPI_CONTROLLER_MUST_RX. */ #define spi_valid_txbuf(msg, xfer) \ (xfer->tx_buf && xfer->tx_buf != msg->spi->controller->dummy_tx) #define spi_valid_rxbuf(msg, xfer) \ (xfer->rx_buf && xfer->rx_buf != msg->spi->controller->dummy_rx) DECLARE_EVENT_CLASS(spi_transfer, TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), TP_ARGS(msg, xfer), TP_STRUCT__entry( __field( int, bus_num ) __field( int, chip_select ) __field( struct spi_transfer *, xfer ) __field( int, len ) __dynamic_array(u8, rx_buf, spi_valid_rxbuf(msg, xfer) ? (xfer->len < 64 ? xfer->len : 64) : 0) __dynamic_array(u8, tx_buf, spi_valid_txbuf(msg, xfer) ? (xfer->len < 64 ? xfer->len : 64) : 0) ), TP_fast_assign( __entry->bus_num = msg->spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(msg->spi, 0); __entry->xfer = xfer; __entry->len = xfer->len; if (spi_valid_txbuf(msg, xfer)) memcpy(__get_dynamic_array(tx_buf), xfer->tx_buf, __get_dynamic_array_len(tx_buf)); if (spi_valid_rxbuf(msg, xfer)) memcpy(__get_dynamic_array(rx_buf), xfer->rx_buf, __get_dynamic_array_len(rx_buf)); ), TP_printk("spi%d.%d %p len=%d tx=[%*phD] rx=[%*phD]", __entry->bus_num, __entry->chip_select, __entry->xfer, __entry->len, __get_dynamic_array_len(tx_buf), __get_dynamic_array(tx_buf), __get_dynamic_array_len(rx_buf), __get_dynamic_array(rx_buf)) ); DEFINE_EVENT(spi_transfer, spi_transfer_start, TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), TP_ARGS(msg, xfer) ); DEFINE_EVENT(spi_transfer, spi_transfer_stop, TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), TP_ARGS(msg, xfer) ); #endif /* _TRACE_POWER_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
437 437 436 437 437 437 435 425 116 116 443 442 353 441 430 430 430 429 4 4 430 6 5 5 5 5 4 5 5 5 5 5 12 12 11 12 11 12 12 4 11 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 | // SPDX-License-Identifier: GPL-2.0-or-later /* V4L2 device support. Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl> */ #include <linux/types.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) { if (v4l2_dev == NULL) return -EINVAL; INIT_LIST_HEAD(&v4l2_dev->subdevs); spin_lock_init(&v4l2_dev->lock); v4l2_prio_init(&v4l2_dev->prio); kref_init(&v4l2_dev->ref); get_device(dev); v4l2_dev->dev = dev; if (dev == NULL) { /* If dev == NULL, then name must be filled in by the caller */ if (WARN_ON(!v4l2_dev->name[0])) return -EINVAL; return 0; } /* Set name to driver name + device name if it is empty. */ if (!v4l2_dev->name[0]) snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s", dev->driver->name, dev_name(dev)); if (!dev_get_drvdata(dev)) dev_set_drvdata(dev, v4l2_dev); return 0; } EXPORT_SYMBOL_GPL(v4l2_device_register); static void v4l2_device_release(struct kref *ref) { struct v4l2_device *v4l2_dev = container_of(ref, struct v4l2_device, ref); if (v4l2_dev->release) v4l2_dev->release(v4l2_dev); } int v4l2_device_put(struct v4l2_device *v4l2_dev) { return kref_put(&v4l2_dev->ref, v4l2_device_release); } EXPORT_SYMBOL_GPL(v4l2_device_put); int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, atomic_t *instance) { int num = atomic_inc_return(instance) - 1; int len = strlen(basename); if (basename[len - 1] >= '0' && basename[len - 1] <= '9') snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s-%d", basename, num); else snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s%d", basename, num); return num; } EXPORT_SYMBOL_GPL(v4l2_device_set_name); void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) { if (v4l2_dev->dev == NULL) return; if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) dev_set_drvdata(v4l2_dev->dev, NULL); put_device(v4l2_dev->dev); v4l2_dev->dev = NULL; } EXPORT_SYMBOL_GPL(v4l2_device_disconnect); void v4l2_device_unregister(struct v4l2_device *v4l2_dev) { struct v4l2_subdev *sd, *next; /* Just return if v4l2_dev is NULL or if it was already * unregistered before. */ if (v4l2_dev == NULL || !v4l2_dev->name[0]) return; v4l2_device_disconnect(v4l2_dev); /* Unregister subdevs */ list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) { v4l2_device_unregister_subdev(sd); if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) v4l2_i2c_subdev_unregister(sd); else if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) v4l2_spi_subdev_unregister(sd); } /* Mark as unregistered, thus preventing duplicate unregistrations */ v4l2_dev->name[0] = '\0'; } EXPORT_SYMBOL_GPL(v4l2_device_unregister); int __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, struct v4l2_subdev *sd, struct module *module) { int err; /* Check for valid input */ if (!v4l2_dev || !sd || sd->v4l2_dev || !sd->name[0]) return -EINVAL; /* * The reason to acquire the module here is to avoid unloading * a module of sub-device which is registered to a media * device. To make it possible to unload modules for media * devices that also register sub-devices, do not * try_module_get() such sub-device owners. */ sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver && module == v4l2_dev->dev->driver->owner; if (!sd->owner_v4l2_dev && !try_module_get(module)) return -ENODEV; sd->v4l2_dev = v4l2_dev; /* This just returns 0 if either of the two args is NULL */ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL, true); if (err) goto error_module; #if defined(CONFIG_MEDIA_CONTROLLER) /* Register the entity. */ if (v4l2_dev->mdev) { err = media_device_register_entity(v4l2_dev->mdev, &sd->entity); if (err < 0) goto error_module; } #endif if (sd->internal_ops && sd->internal_ops->registered) { err = sd->internal_ops->registered(sd); if (err) goto error_unregister; } sd->owner = module; spin_lock(&v4l2_dev->lock); list_add_tail(&sd->list, &v4l2_dev->subdevs); spin_unlock(&v4l2_dev->lock); return 0; error_unregister: #if defined(CONFIG_MEDIA_CONTROLLER) media_device_unregister_entity(&sd->entity); #endif error_module: if (!sd->owner_v4l2_dev) module_put(sd->owner); sd->v4l2_dev = NULL; return err; } EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev); static void v4l2_subdev_release(struct v4l2_subdev *sd) { struct module *owner = !sd->owner_v4l2_dev ? sd->owner : NULL; if (sd->internal_ops && sd->internal_ops->release) sd->internal_ops->release(sd); sd->devnode = NULL; module_put(owner); } static void v4l2_device_release_subdev_node(struct video_device *vdev) { v4l2_subdev_release(video_get_drvdata(vdev)); kfree(vdev); } int __v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev, bool read_only) { struct video_device *vdev; struct v4l2_subdev *sd; int err; /* Register a device node for every subdev marked with the * V4L2_SUBDEV_FL_HAS_DEVNODE flag. */ list_for_each_entry(sd, &v4l2_dev->subdevs, list) { if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) continue; if (sd->devnode) continue; vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); if (!vdev) { err = -ENOMEM; goto clean_up; } video_set_drvdata(vdev, sd); strscpy(vdev->name, sd->name, sizeof(vdev->name)); vdev->dev_parent = sd->dev; vdev->v4l2_dev = v4l2_dev; vdev->fops = &v4l2_subdev_fops; vdev->release = v4l2_device_release_subdev_node; vdev->ctrl_handler = sd->ctrl_handler; if (read_only) set_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); sd->devnode = vdev; err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1, sd->owner); if (err < 0) { sd->devnode = NULL; kfree(vdev); goto clean_up; } #if defined(CONFIG_MEDIA_CONTROLLER) sd->entity.info.dev.major = VIDEO_MAJOR; sd->entity.info.dev.minor = vdev->minor; /* Interface is created by __video_register_device() */ if (vdev->v4l2_dev->mdev) { struct media_link *link; link = media_create_intf_link(&sd->entity, &vdev->intf_devnode->intf, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (!link) { err = -ENOMEM; goto clean_up; } } #endif } return 0; clean_up: list_for_each_entry(sd, &v4l2_dev->subdevs, list) { if (!sd->devnode) break; video_unregister_device(sd->devnode); } return err; } EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev_nodes); void v4l2_device_unregister_subdev(struct v4l2_subdev *sd) { struct v4l2_device *v4l2_dev; /* return if it isn't registered */ if (sd == NULL || sd->v4l2_dev == NULL) return; v4l2_dev = sd->v4l2_dev; spin_lock(&v4l2_dev->lock); list_del(&sd->list); spin_unlock(&v4l2_dev->lock); if (sd->internal_ops && sd->internal_ops->unregistered) sd->internal_ops->unregistered(sd); sd->v4l2_dev = NULL; #if defined(CONFIG_MEDIA_CONTROLLER) if (v4l2_dev->mdev) { /* * No need to explicitly remove links, as both pads and * links are removed by the function below, in the right order */ media_device_unregister_entity(&sd->entity); } #endif if (sd->devnode) video_unregister_device(sd->devnode); else v4l2_subdev_release(sd); } EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev); |
10 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 | // SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/vmalloc.h> #include <linux/crc32.h> #include <linux/firmware.h> #include <linux/kstrtox.h> #include "core.h" #include "debug.h" #include "hif.h" #include "wmi-ops.h" /* ms */ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 #define ATH10K_DEBUG_CAL_DATA_LEN 12064 void ath10k_info(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; dev_info(ar->dev, "%pV", &vaf); trace_ath10k_log_info(ar, &vaf); va_end(args); } EXPORT_SYMBOL(ath10k_info); void ath10k_debug_print_hwfw_info(struct ath10k *ar) { const struct firmware *firmware; char fw_features[128] = {}; u32 crc = 0; ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x", ar->hw_params.name, ar->target_version, ar->bus_param.chip_id, ar->id.subsystem_vendor, ar->id.subsystem_device); ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n", IS_ENABLED(CONFIG_ATH10K_DEBUG), IS_ENABLED(CONFIG_ATH10K_DEBUGFS), IS_ENABLED(CONFIG_ATH10K_TRACING), IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED), IS_ENABLED(CONFIG_NL80211_TESTMODE)); firmware = ar->normal_mode_fw.fw_file.firmware; if (firmware) crc = crc32_le(0, firmware->data, firmware->size); ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n", ar->hw->wiphy->fw_version, ar->fw_api, fw_features, crc); } void ath10k_debug_print_board_info(struct ath10k *ar) { char boardinfo[100]; const struct firmware *board; u32 crc; if (ar->id.bmi_ids_valid) scnprintf(boardinfo, sizeof(boardinfo), "%d:%d", ar->id.bmi_chip_id, ar->id.bmi_board_id); else scnprintf(boardinfo, sizeof(boardinfo), "N/A"); board = ar->normal_mode_fw.board; if (!IS_ERR_OR_NULL(board)) crc = crc32_le(0, board->data, board->size); else crc = 0; ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x", ar->bd_api, boardinfo, crc); } void ath10k_debug_print_boot_info(struct ath10k *ar) { ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n", ar->htt.target_version_major, ar->htt.target_version_minor, ar->normal_mode_fw.fw_file.wmi_op_version, ar->normal_mode_fw.fw_file.htt_op_version, ath10k_cal_mode_str(ar->cal_mode), ar->max_num_stations, test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags), !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)); } void ath10k_print_driver_info(struct ath10k *ar) { ath10k_debug_print_hwfw_info(ar); ath10k_debug_print_board_info(ar); ath10k_debug_print_boot_info(ar); } EXPORT_SYMBOL(ath10k_print_driver_info); void ath10k_err(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; dev_err(ar->dev, "%pV", &vaf); trace_ath10k_log_err(ar, &vaf); va_end(args); } EXPORT_SYMBOL(ath10k_err); void ath10k_warn(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; dev_warn_ratelimited(ar->dev, "%pV", &vaf); trace_ath10k_log_warn(ar, &vaf); va_end(args); } EXPORT_SYMBOL(ath10k_warn); #ifdef CONFIG_ATH10K_DEBUGFS static ssize_t ath10k_read_wmi_services(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char *buf; size_t len = 0, buf_len = 8192; const char *name; ssize_t ret_cnt; bool enabled; int i; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); for (i = 0; i < WMI_SERVICE_MAX; i++) { enabled = test_bit(i, ar->wmi.svc_map); name = wmi_service_name(i); if (!name) { if (enabled) len += scnprintf(buf + len, buf_len - len, "%-40s %s (bit %d)\n", "unknown", "enabled", i); continue; } len += scnprintf(buf + len, buf_len - len, "%-40s %s\n", name, enabled ? "enabled" : "-"); } spin_unlock_bh(&ar->data_lock); ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); mutex_unlock(&ar->conf_mutex); kfree(buf); return ret_cnt; } static const struct file_operations fops_wmi_services = { .read = ath10k_read_wmi_services, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static void ath10k_fw_stats_pdevs_free(struct list_head *head) { struct ath10k_fw_stats_pdev *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } static void ath10k_fw_stats_vdevs_free(struct list_head *head) { struct ath10k_fw_stats_vdev *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } static void ath10k_fw_stats_peers_free(struct list_head *head) { struct ath10k_fw_stats_peer *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } static void ath10k_fw_extd_stats_peers_free(struct list_head *head) { struct ath10k_fw_extd_stats_peer *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } static void ath10k_debug_fw_stats_reset(struct ath10k *ar) { spin_lock_bh(&ar->data_lock); ar->debug.fw_stats_done = false; ar->debug.fw_stats.extended = false; ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); spin_unlock_bh(&ar->data_lock); } void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_fw_stats stats = {}; bool is_start, is_started, is_end; size_t num_peers; size_t num_vdevs; int ret; INIT_LIST_HEAD(&stats.pdevs); INIT_LIST_HEAD(&stats.vdevs); INIT_LIST_HEAD(&stats.peers); INIT_LIST_HEAD(&stats.peers_extd); spin_lock_bh(&ar->data_lock); ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats); if (ret) { ath10k_warn(ar, "failed to pull fw stats: %d\n", ret); goto free; } /* Stat data may exceed htc-wmi buffer limit. In such case firmware * splits the stats data and delivers it in a ping-pong fashion of * request cmd-update event. * * However there is no explicit end-of-data. Instead start-of-data is * used as an implicit one. This works as follows: * a) discard stat update events until one with pdev stats is * delivered - this skips session started at end of (b) * b) consume stat update events until another one with pdev stats is * delivered which is treated as end-of-data and is itself discarded */ if (ath10k_peer_stats_enabled(ar)) ath10k_sta_update_rx_duration(ar, &stats); if (ar->debug.fw_stats_done) { if (!ath10k_peer_stats_enabled(ar)) ath10k_warn(ar, "received unsolicited stats update event\n"); goto free; } num_peers = list_count_nodes(&ar->debug.fw_stats.peers); num_vdevs = list_count_nodes(&ar->debug.fw_stats.vdevs); is_start = (list_empty(&ar->debug.fw_stats.pdevs) && !list_empty(&stats.pdevs)); is_end = (!list_empty(&ar->debug.fw_stats.pdevs) && !list_empty(&stats.pdevs)); if (is_start) list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs); if (is_end) ar->debug.fw_stats_done = true; if (stats.extended) ar->debug.fw_stats.extended = true; is_started = !list_empty(&ar->debug.fw_stats.pdevs); if (is_started && !is_end) { if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) { /* Although this is unlikely impose a sane limit to * prevent firmware from DoS-ing the host. */ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); ath10k_warn(ar, "dropping fw peer stats\n"); goto free; } if (num_vdevs >= BITS_PER_LONG) { ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); ath10k_warn(ar, "dropping fw vdev stats\n"); goto free; } if (!list_empty(&stats.peers)) list_splice_tail_init(&stats.peers_extd, &ar->debug.fw_stats.peers_extd); list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); } complete(&ar->debug.fw_stats_complete); free: /* In some cases lists have been spliced and cleared. Free up * resources if that is not the case. */ ath10k_fw_stats_pdevs_free(&stats.pdevs); ath10k_fw_stats_vdevs_free(&stats.vdevs); ath10k_fw_stats_peers_free(&stats.peers); ath10k_fw_extd_stats_peers_free(&stats.peers_extd); spin_unlock_bh(&ar->data_lock); } int ath10k_debug_fw_stats_request(struct ath10k *ar) { unsigned long timeout, time_left; int ret; lockdep_assert_held(&ar->conf_mutex); timeout = jiffies + msecs_to_jiffies(1 * HZ); ath10k_debug_fw_stats_reset(ar); for (;;) { if (time_after(jiffies, timeout)) return -ETIMEDOUT; reinit_completion(&ar->debug.fw_stats_complete); ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask); if (ret) { ath10k_warn(ar, "could not request stats (%d)\n", ret); return ret; } time_left = wait_for_completion_timeout(&ar->debug.fw_stats_complete, 1 * HZ); if (!time_left) return -ETIMEDOUT; spin_lock_bh(&ar->data_lock); if (ar->debug.fw_stats_done) { spin_unlock_bh(&ar->data_lock); break; } spin_unlock_bh(&ar->data_lock); } return 0; } static int ath10k_fw_stats_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; void *buf = NULL; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE); if (!buf) { ret = -ENOMEM; goto err_unlock; } ret = ath10k_debug_fw_stats_request(ar); if (ret) { ath10k_warn(ar, "failed to request fw stats: %d\n", ret); goto err_free; } ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf); if (ret) { ath10k_warn(ar, "failed to fill fw stats: %d\n", ret); goto err_free; } file->private_data = buf; mutex_unlock(&ar->conf_mutex); return 0; err_free: vfree(buf); err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath10k_fw_stats_release(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_fw_stats = { .open = ath10k_fw_stats_open, .release = ath10k_fw_stats_release, .read = ath10k_fw_stats_read, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; size_t len = 0, buf_len = 500; char *buf; buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; spin_lock_bh(&ar->data_lock); len += scnprintf(buf + len, buf_len - len, "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter); len += scnprintf(buf + len, buf_len - len, "fw_warm_reset_counter\t\t%d\n", ar->stats.fw_warm_reset_counter); len += scnprintf(buf + len, buf_len - len, "fw_cold_reset_counter\t\t%d\n", ar->stats.fw_cold_reset_counter); spin_unlock_bh(&ar->data_lock); ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret; } static const struct file_operations fops_fw_reset_stats = { .open = simple_open, .read = ath10k_debug_fw_reset_stats_read, .owner = THIS_MODULE, .llseek = default_llseek, }; /* This is a clean assert crash in firmware. */ static int ath10k_debug_fw_assert(struct ath10k *ar) { struct wmi_vdev_install_key_cmd *cmd; struct sk_buff *skb; skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_install_key_cmd *)skb->data; memset(cmd, 0, sizeof(*cmd)); /* big enough number so that firmware asserts */ cmd->vdev_id = __cpu_to_le32(0x7ffe); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_install_key_cmdid); } static ssize_t ath10k_read_simulate_fw_crash(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char buf[] = "To simulate firmware crash write one of the keywords to this file:\n" "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n" "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n" "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n" "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n"; return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } /* Simulate firmware crash: * 'soft': Call wmi command causing firmware hang. This firmware hang is * recoverable by warm firmware reset. * 'hard': Force firmware crash by setting any vdev parameter for not allowed * vdev id. This is hard firmware crash because it is recoverable only by cold * firmware reset. */ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char buf[32] = {0}; ssize_t rc; int ret; /* filter partial writes and invalid commands */ if (*ppos != 0 || count >= sizeof(buf) || count == 0) return -EINVAL; rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (rc < 0) return rc; /* drop the possible '\n' from the end */ if (buf[*ppos - 1] == '\n') buf[*ppos - 1] = '\0'; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_RESTARTED) { ret = -ENETDOWN; goto exit; } if (!strcmp(buf, "soft")) { ath10k_info(ar, "simulating soft firmware crash\n"); ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); } else if (!strcmp(buf, "hard")) { ath10k_info(ar, "simulating hard firmware crash\n"); /* 0x7fff is vdev id, and it is always out of range for all * firmware variants in order to force a firmware crash. */ ret = ath10k_wmi_vdev_set_param(ar, 0x7fff, ar->wmi.vdev_param->rts_threshold, 0); } else if (!strcmp(buf, "assert")) { ath10k_info(ar, "simulating firmware assert crash\n"); ret = ath10k_debug_fw_assert(ar); } else if (!strcmp(buf, "hw-restart")) { ath10k_info(ar, "user requested hw restart\n"); ath10k_core_start_recovery(ar); ret = 0; } else { ret = -EINVAL; goto exit; } if (ret) { ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret); goto exit; } ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_simulate_fw_crash = { .read = ath10k_read_simulate_fw_crash, .write = ath10k_write_simulate_fw_crash, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len; char buf[50]; len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->bus_param.chip_id); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_chip_id = { .read = ath10k_read_chip_id, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_reg_addr_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u8 buf[32]; size_t len = 0; u32 reg_addr; mutex_lock(&ar->conf_mutex); reg_addr = ar->debug.reg_addr; mutex_unlock(&ar->conf_mutex); len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_reg_addr_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 reg_addr; int ret; ret = kstrtou32_from_user(user_buf, count, 0, ®_addr); if (ret) return ret; if (!IS_ALIGNED(reg_addr, 4)) return -EFAULT; mutex_lock(&ar->conf_mutex); ar->debug.reg_addr = reg_addr; mutex_unlock(&ar->conf_mutex); return count; } static const struct file_operations fops_reg_addr = { .read = ath10k_reg_addr_read, .write = ath10k_reg_addr_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_reg_value_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u8 buf[48]; size_t len; u32 reg_addr, reg_val; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_UTF) { ret = -ENETDOWN; goto exit; } reg_addr = ar->debug.reg_addr; reg_val = ath10k_hif_read32(ar, reg_addr); len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val); ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_reg_value_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 reg_addr, reg_val; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_UTF) { ret = -ENETDOWN; goto exit; } reg_addr = ar->debug.reg_addr; ret = kstrtou32_from_user(user_buf, count, 0, ®_val); if (ret) goto exit; ath10k_hif_write32(ar, reg_addr, reg_val); ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_reg_value = { .read = ath10k_reg_value_read, .write = ath10k_reg_value_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_mem_value_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u8 *buf; int ret; if (*ppos < 0) return -EINVAL; if (!count) return 0; mutex_lock(&ar->conf_mutex); buf = vmalloc(count); if (!buf) { ret = -ENOMEM; goto exit; } if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_UTF) { ret = -ENETDOWN; goto exit; } ret = ath10k_hif_diag_read(ar, *ppos, buf, count); if (ret) { ath10k_warn(ar, "failed to read address 0x%08x via diagnose window from debugfs: %d\n", (u32)(*ppos), ret); goto exit; } ret = copy_to_user(user_buf, buf, count); if (ret) { ret = -EFAULT; goto exit; } count -= ret; *ppos += count; ret = count; exit: vfree(buf); mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_mem_value_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u8 *buf; int ret; if (*ppos < 0) return -EINVAL; if (!count) return 0; mutex_lock(&ar->conf_mutex); buf = vmalloc(count); if (!buf) { ret = -ENOMEM; goto exit; } if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_UTF) { ret = -ENETDOWN; goto exit; } ret = copy_from_user(buf, user_buf, count); if (ret) { ret = -EFAULT; goto exit; } ret = ath10k_hif_diag_write(ar, *ppos, buf, count); if (ret) { ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n", (u32)(*ppos), ret); goto exit; } *ppos += count; ret = count; exit: vfree(buf); mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_mem_value = { .read = ath10k_mem_value_read, .write = ath10k_mem_value_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath10k_debug_htt_stats_req(struct ath10k *ar) { u64 cookie; int ret; lockdep_assert_held(&ar->conf_mutex); if (ar->debug.htt_stats_mask == 0) /* htt stats are disabled */ return 0; if (ar->state != ATH10K_STATE_ON) return 0; cookie = get_jiffies_64(); ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask, ar->debug.reset_htt_stats, cookie); if (ret) { ath10k_warn(ar, "failed to send htt stats request: %d\n", ret); return ret; } queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork, msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL)); return 0; } static void ath10k_debug_htt_stats_dwork(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, debug.htt_stats_dwork.work); mutex_lock(&ar->conf_mutex); ath10k_debug_htt_stats_req(ar); mutex_unlock(&ar->conf_mutex); } static ssize_t ath10k_read_htt_stats_mask(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char buf[32]; size_t len; len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_write_htt_stats_mask(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; unsigned long mask; int ret; ret = kstrtoul_from_user(user_buf, count, 0, &mask); if (ret) return ret; /* max 17 bit masks (for now) */ if (mask > HTT_STATS_BIT_MASK) return -E2BIG; mutex_lock(&ar->conf_mutex); ar->debug.htt_stats_mask = mask; ret = ath10k_debug_htt_stats_req(ar); if (ret) goto out; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_htt_stats_mask = { .read = ath10k_read_htt_stats_mask, .write = ath10k_write_htt_stats_mask, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char buf[64]; u8 amsdu, ampdu; size_t len; mutex_lock(&ar->conf_mutex); amsdu = ar->htt.max_num_amsdu; ampdu = ar->htt.max_num_ampdu; mutex_unlock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int res; char buf[64] = {0}; unsigned int amsdu, ampdu; res = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (res <= 0) return res; res = sscanf(buf, "%u %u", &amsdu, &du); if (res != 2) return -EINVAL; mutex_lock(&ar->conf_mutex); res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu); if (res) goto out; res = count; ar->htt.max_num_amsdu = amsdu; ar->htt.max_num_ampdu = ampdu; out: mutex_unlock(&ar->conf_mutex); return res; } static const struct file_operations fops_htt_max_amsdu_ampdu = { .read = ath10k_read_htt_max_amsdu_ampdu, .write = ath10k_write_htt_max_amsdu_ampdu, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_read_fw_dbglog(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len; char buf[96]; len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n", ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_write_fw_dbglog(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; char buf[96] = {0}; unsigned int log_level; u64 mask; ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret <= 0) return ret; ret = sscanf(buf, "%llx %u", &mask, &log_level); if (!ret) return -EINVAL; if (ret == 1) /* default if user did not specify */ log_level = ATH10K_DBGLOG_LEVEL_WARN; mutex_lock(&ar->conf_mutex); ar->debug.fw_dbglog_mask = mask; ar->debug.fw_dbglog_level = log_level; if (ar->state == ATH10K_STATE_ON) { ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level); if (ret) { ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", ret); goto exit; } } ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } /* TODO: Would be nice to always support ethtool stats, would need to * move the stats storage out of ath10k_debug, or always have ath10k_debug * struct available.. */ /* This generally corresponds to the debugfs fw_stats file */ static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = { "tx_pkts_nic", "tx_bytes_nic", "rx_pkts_nic", "rx_bytes_nic", "d_noise_floor", "d_cycle_count", "d_phy_error", "d_rts_bad", "d_rts_good", "d_tx_power", /* in .5 dbM I think */ "d_rx_crc_err", /* fcs_bad */ "d_rx_crc_err_drop", /* frame with FCS error, dropped late in kernel */ "d_no_beacon", "d_tx_mpdus_queued", "d_tx_msdu_queued", "d_tx_msdu_dropped", "d_local_enqued", "d_local_freed", "d_tx_ppdu_hw_queued", "d_tx_ppdu_reaped", "d_tx_fifo_underrun", "d_tx_ppdu_abort", "d_tx_mpdu_requeued", "d_tx_excessive_retries", "d_tx_hw_rate", "d_tx_dropped_sw_retries", "d_tx_illegal_rate", "d_tx_continuous_xretries", "d_tx_timeout", "d_tx_mpdu_txop_limit", "d_pdev_resets", "d_rx_mid_ppdu_route_change", "d_rx_status", "d_rx_extra_frags_ring0", "d_rx_extra_frags_ring1", "d_rx_extra_frags_ring2", "d_rx_extra_frags_ring3", "d_rx_msdu_htt", "d_rx_mpdu_htt", "d_rx_msdu_stack", "d_rx_mpdu_stack", "d_rx_phy_err", "d_rx_phy_err_drops", "d_rx_mpdu_errors", /* FCS, MIC, ENC */ "d_fw_crash_count", "d_fw_warm_reset_count", "d_fw_cold_reset_count", }; #define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats) void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) memcpy(data, ath10k_gstrings_stats, sizeof(ath10k_gstrings_stats)); } int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int sset) { if (sset == ETH_SS_STATS) return ATH10K_SSTATS_LEN; return 0; } void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data) { struct ath10k *ar = hw->priv; static const struct ath10k_fw_stats_pdev zero_stats = {}; const struct ath10k_fw_stats_pdev *pdev_stats; int i = 0, ret; mutex_lock(&ar->conf_mutex); if (ar->state == ATH10K_STATE_ON) { ret = ath10k_debug_fw_stats_request(ar); if (ret) { /* just print a warning and try to use older results */ ath10k_warn(ar, "failed to get fw stats for ethtool: %d\n", ret); } } pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs, struct ath10k_fw_stats_pdev, list); if (!pdev_stats) { /* no results available so just return zeroes */ pdev_stats = &zero_stats; } spin_lock_bh(&ar->data_lock); data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */ data[i++] = 0; /* tx bytes */ data[i++] = pdev_stats->htt_mpdus; data[i++] = 0; /* rx bytes */ data[i++] = pdev_stats->ch_noise_floor; data[i++] = pdev_stats->cycle_count; data[i++] = pdev_stats->phy_err_count; data[i++] = pdev_stats->rts_bad; data[i++] = pdev_stats->rts_good; data[i++] = pdev_stats->chan_tx_power; data[i++] = pdev_stats->fcs_bad; data[i++] = ar->stats.rx_crc_err_drop; data[i++] = pdev_stats->no_beacons; data[i++] = pdev_stats->mpdu_enqued; data[i++] = pdev_stats->msdu_enqued; data[i++] = pdev_stats->wmm_drop; data[i++] = pdev_stats->local_enqued; data[i++] = pdev_stats->local_freed; data[i++] = pdev_stats->hw_queued; data[i++] = pdev_stats->hw_reaped; data[i++] = pdev_stats->underrun; data[i++] = pdev_stats->tx_abort; data[i++] = pdev_stats->mpdus_requeued; data[i++] = pdev_stats->tx_ko; data[i++] = pdev_stats->data_rc; data[i++] = pdev_stats->sw_retry_failure; data[i++] = pdev_stats->illgl_rate_phy_err; data[i++] = pdev_stats->pdev_cont_xretry; data[i++] = pdev_stats->pdev_tx_timeout; data[i++] = pdev_stats->txop_ovf; data[i++] = pdev_stats->pdev_resets; data[i++] = pdev_stats->mid_ppdu_route_change; data[i++] = pdev_stats->status_rcvd; data[i++] = pdev_stats->r0_frags; data[i++] = pdev_stats->r1_frags; data[i++] = pdev_stats->r2_frags; data[i++] = pdev_stats->r3_frags; data[i++] = pdev_stats->htt_msdus; data[i++] = pdev_stats->htt_mpdus; data[i++] = pdev_stats->loc_msdus; data[i++] = pdev_stats->loc_mpdus; data[i++] = pdev_stats->phy_errs; data[i++] = pdev_stats->phy_err_drop; data[i++] = pdev_stats->mpdu_errs; data[i++] = ar->stats.fw_crash_counter; data[i++] = ar->stats.fw_warm_reset_counter; data[i++] = ar->stats.fw_cold_reset_counter; spin_unlock_bh(&ar->data_lock); mutex_unlock(&ar->conf_mutex); WARN_ON(i != ATH10K_SSTATS_LEN); } static const struct file_operations fops_fw_dbglog = { .read = ath10k_read_fw_dbglog, .write = ath10k_write_fw_dbglog, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath10k_debug_cal_data_fetch(struct ath10k *ar) { u32 hi_addr; __le32 addr; int ret; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN)) return -EINVAL; if (ar->hw_params.cal_data_len == 0) return -EOPNOTSUPP; hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); if (ret) { ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret); return ret; } ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data, ar->hw_params.cal_data_len); if (ret) { ath10k_warn(ar, "failed to read calibration data: %d\n", ret); return ret; } return 0; } static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; mutex_lock(&ar->conf_mutex); if (ar->state == ATH10K_STATE_ON || ar->state == ATH10K_STATE_UTF) { ath10k_debug_cal_data_fetch(ar); } file->private_data = ar; mutex_unlock(&ar->conf_mutex); return 0; } static ssize_t ath10k_debug_cal_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; mutex_lock(&ar->conf_mutex); count = simple_read_from_buffer(user_buf, count, ppos, ar->debug.cal_data, ar->hw_params.cal_data_len); mutex_unlock(&ar->conf_mutex); return count; } static ssize_t ath10k_write_ani_enable(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; u8 enable; if (kstrtou8_from_user(user_buf, count, 0, &enable)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->ani_enabled == enable) { ret = count; goto exit; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable, enable); if (ret) { ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret); goto exit; } ar->ani_enabled = enable; ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len; char buf[32]; len = scnprintf(buf, sizeof(buf), "%d\n", ar->ani_enabled); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_ani_enable = { .read = ath10k_read_ani_enable, .write = ath10k_write_ani_enable, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static const struct file_operations fops_cal_data = { .open = ath10k_debug_cal_data_open, .read = ath10k_debug_cal_data_read, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_read_nf_cal_period(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len; char buf[32]; len = scnprintf(buf, sizeof(buf), "%d\n", ar->debug.nf_cal_period); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_write_nf_cal_period(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; unsigned long period; int ret; ret = kstrtoul_from_user(user_buf, count, 0, &period); if (ret) return ret; if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX) return -EINVAL; /* there's no way to switch back to the firmware default */ if (period == 0) return -EINVAL; mutex_lock(&ar->conf_mutex); ar->debug.nf_cal_period = period; if (ar->state != ATH10K_STATE_ON) { /* firmware is not running, nothing else to do */ ret = count; goto exit; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, ar->debug.nf_cal_period); if (ret) { ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n", ret); goto exit; } ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_nf_cal_period = { .read = ath10k_read_nf_cal_period, .write = ath10k_write_nf_cal_period, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; #define ATH10K_TPC_CONFIG_BUF_SIZE (1024 * 1024) static int ath10k_debug_tpc_stats_request(struct ath10k *ar) { int ret; unsigned long time_left; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->debug.tpc_complete); ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM); if (ret) { ath10k_warn(ar, "failed to request tpc config: %d\n", ret); return ret; } time_left = wait_for_completion_timeout(&ar->debug.tpc_complete, 1 * HZ); if (time_left == 0) return -ETIMEDOUT; return 0; } void ath10k_debug_tpc_stats_process(struct ath10k *ar, struct ath10k_tpc_stats *tpc_stats) { spin_lock_bh(&ar->data_lock); kfree(ar->debug.tpc_stats); ar->debug.tpc_stats = tpc_stats; complete(&ar->debug.tpc_complete); spin_unlock_bh(&ar->data_lock); } void ath10k_debug_tpc_stats_final_process(struct ath10k *ar, struct ath10k_tpc_stats_final *tpc_stats) { spin_lock_bh(&ar->data_lock); kfree(ar->debug.tpc_stats_final); ar->debug.tpc_stats_final = tpc_stats; complete(&ar->debug.tpc_complete); spin_unlock_bh(&ar->data_lock); } static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, unsigned int j, char *buf, size_t *len) { int i; size_t buf_len; static const char table_str[][5] = { "CDD", "STBC", "TXBF" }; static const char pream_str[][6] = { "CCK", "OFDM", "HT20", "HT40", "VHT20", "VHT40", "VHT80", "HTCUP" }; buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; *len += scnprintf(buf + *len, buf_len - *len, "********************************\n"); *len += scnprintf(buf + *len, buf_len - *len, "******************* %s POWER TABLE ****************\n", table_str[j]); *len += scnprintf(buf + *len, buf_len - *len, "********************************\n"); *len += scnprintf(buf + *len, buf_len - *len, "No. Preamble Rate_code "); for (i = 0; i < tpc_stats->num_tx_chain; i++) *len += scnprintf(buf + *len, buf_len - *len, "tpc_value%d ", i); *len += scnprintf(buf + *len, buf_len - *len, "\n"); for (i = 0; i < tpc_stats->rate_max; i++) { *len += scnprintf(buf + *len, buf_len - *len, "%8d %s 0x%2x %s\n", i, pream_str[tpc_stats->tpc_table[j].pream_idx[i]], tpc_stats->tpc_table[j].rate_code[i], tpc_stats->tpc_table[j].tpc_value[i]); } *len += scnprintf(buf + *len, buf_len - *len, "***********************************\n"); } static void ath10k_tpc_stats_fill(struct ath10k *ar, struct ath10k_tpc_stats *tpc_stats, char *buf) { int j; size_t len, buf_len; len = 0; buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; spin_lock_bh(&ar->data_lock); if (!tpc_stats) { ath10k_warn(ar, "failed to get tpc stats\n"); goto unlock; } len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "*************************************\n"); len += scnprintf(buf + len, buf_len - len, "TPC config for channel %4d mode %d\n", tpc_stats->chan_freq, tpc_stats->phy_mode); len += scnprintf(buf + len, buf_len - len, "*************************************\n"); len += scnprintf(buf + len, buf_len - len, "CTL = 0x%2x Reg. Domain = %2d\n", tpc_stats->ctl, tpc_stats->reg_domain); len += scnprintf(buf + len, buf_len - len, "Antenna Gain = %2d Reg. Max Antenna Gain = %2d\n", tpc_stats->twice_antenna_gain, tpc_stats->twice_antenna_reduction); len += scnprintf(buf + len, buf_len - len, "Power Limit = %2d Reg. Max Power = %2d\n", tpc_stats->power_limit, tpc_stats->twice_max_rd_power / 2); len += scnprintf(buf + len, buf_len - len, "Num tx chains = %2d Num supported rates = %2d\n", tpc_stats->num_tx_chain, tpc_stats->rate_max); for (j = 0; j < WMI_TPC_FLAG; j++) { switch (j) { case WMI_TPC_TABLE_TYPE_CDD: if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { len += scnprintf(buf + len, buf_len - len, "CDD not supported\n"); break; } ath10k_tpc_stats_print(tpc_stats, j, buf, &len); break; case WMI_TPC_TABLE_TYPE_STBC: if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { len += scnprintf(buf + len, buf_len - len, "STBC not supported\n"); break; } ath10k_tpc_stats_print(tpc_stats, j, buf, &len); break; case WMI_TPC_TABLE_TYPE_TXBF: if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) { len += scnprintf(buf + len, buf_len - len, "TXBF not supported\n***************************\n"); break; } ath10k_tpc_stats_print(tpc_stats, j, buf, &len); break; default: len += scnprintf(buf + len, buf_len - len, "Invalid Type\n"); break; } } unlock: spin_unlock_bh(&ar->data_lock); if (len >= buf_len) buf[len - 1] = 0; else buf[len] = 0; } static int ath10k_tpc_stats_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; void *buf = NULL; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE); if (!buf) { ret = -ENOMEM; goto err_unlock; } ret = ath10k_debug_tpc_stats_request(ar); if (ret) { ath10k_warn(ar, "failed to request tpc config stats: %d\n", ret); goto err_free; } ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); file->private_data = buf; mutex_unlock(&ar->conf_mutex); return 0; err_free: vfree(buf); err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath10k_tpc_stats_release(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_tpc_stats = { .open = ath10k_tpc_stats_open, .release = ath10k_tpc_stats_release, .read = ath10k_tpc_stats_read, .owner = THIS_MODULE, .llseek = default_llseek, }; int ath10k_debug_start(struct ath10k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); ret = ath10k_debug_htt_stats_req(ar); if (ret) /* continue normally anyway, this isn't serious */ ath10k_warn(ar, "failed to start htt stats workqueue: %d\n", ret); if (ar->debug.fw_dbglog_mask) { ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask, ATH10K_DBGLOG_LEVEL_WARN); if (ret) /* not serious */ ath10k_warn(ar, "failed to enable dbglog during start: %d", ret); } if (ar->pktlog_filter) { ret = ath10k_wmi_pdev_pktlog_enable(ar, ar->pktlog_filter); if (ret) /* not serious */ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", ar->pktlog_filter, ret); } else { ret = ath10k_wmi_pdev_pktlog_disable(ar); if (ret) /* not serious */ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); } if (ar->debug.nf_cal_period && !test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->normal_mode_fw.fw_file.fw_features)) { ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, ar->debug.nf_cal_period); if (ret) /* not serious */ ath10k_warn(ar, "cal period cfg failed from debug start: %d\n", ret); } return ret; } void ath10k_debug_stop(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->normal_mode_fw.fw_file.fw_features)) ath10k_debug_cal_data_fetch(ar); /* Must not use _sync to avoid deadlock, we do that in * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid * warning from timer_delete(). */ if (ar->debug.htt_stats_mask != 0) cancel_delayed_work(&ar->debug.htt_stats_dwork); ath10k_wmi_pdev_pktlog_disable(ar); } static ssize_t ath10k_write_simulate_radar(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; struct ath10k_vif *arvif; /* Just check for the first vif alone, as all the vifs will be * sharing the same channel and if the channel is disabled, all the * vifs will share the same 'is_started' state. */ arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list); if (!arvif->is_started) return -EINVAL; ieee80211_radar_detected(ar->hw, NULL); return count; } static const struct file_operations fops_simulate_radar = { .write = ath10k_write_simulate_radar, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; #define ATH10K_DFS_STAT(s, p) (\ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ ar->debug.dfs_stats.p)) #define ATH10K_DFS_POOL_STAT(s, p) (\ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ ar->debug.dfs_pool_stats.p)) static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { int retval = 0, len = 0; const int size = 8000; struct ath10k *ar = file->private_data; char *buf; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; if (!ar->dfs_detector) { len += scnprintf(buf + len, size - len, "DFS not enabled\n"); goto exit; } ar->debug.dfs_pool_stats = ar->dfs_detector->get_stats(ar->dfs_detector); len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); ATH10K_DFS_STAT("reported phy errors", phy_errors); ATH10K_DFS_STAT("pulse events reported", pulses_total); ATH10K_DFS_STAT("DFS pulses detected", pulses_detected); ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded); ATH10K_DFS_STAT("Radars detected", radar_detected); len += scnprintf(buf + len, size - len, "Global Pool statistics:\n"); ATH10K_DFS_POOL_STAT("Pool references", pool_reference); ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated); ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error); ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used); ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated); ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error); ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used); exit: if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static const struct file_operations fops_dfs_stats = { .read = ath10k_read_dfs_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_write_pktlog_filter(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 filter; int ret; if (kstrtouint_from_user(ubuf, count, 0, &filter)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ar->pktlog_filter = filter; ret = count; goto out; } if (filter == ar->pktlog_filter) { ret = count; goto out; } if (filter) { ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); if (ret) { ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", ar->pktlog_filter, ret); goto out; } } else { ret = ath10k_wmi_pdev_pktlog_disable(ar); if (ret) { ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); goto out; } } ar->pktlog_filter = filter; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%08x\n", ar->pktlog_filter); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_pktlog_filter = { .read = ath10k_read_pktlog_filter, .write = ath10k_write_pktlog_filter, .open = simple_open }; static ssize_t ath10k_write_quiet_period(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 period; if (kstrtouint_from_user(ubuf, count, 0, &period)) return -EINVAL; if (period < ATH10K_QUIET_PERIOD_MIN) { ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n", period); return -EINVAL; } mutex_lock(&ar->conf_mutex); ar->thermal.quiet_period = period; ath10k_thermal_set_throttling(ar); mutex_unlock(&ar->conf_mutex); return count; } static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%d\n", ar->thermal.quiet_period); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_quiet_period = { .read = ath10k_read_quiet_period, .write = ath10k_write_quiet_period, .open = simple_open }; static ssize_t ath10k_write_btcoex(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; ssize_t ret; bool val; u32 pdev_param; ret = kstrtobool_from_user(ubuf, count, &val); if (ret) return ret; if (!ar->coex_support) return -EOPNOTSUPP; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_RESTARTED) { ret = -ENETDOWN; goto exit; } if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) { ret = count; goto exit; } pdev_param = ar->wmi.pdev_param->enable_btcoex; if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, ar->running_fw->fw_file.fw_features)) { ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val); if (ret) { ath10k_warn(ar, "failed to enable btcoex: %zd\n", ret); ret = count; goto exit; } } else { ath10k_info(ar, "restarting firmware due to btcoex change"); ath10k_core_start_recovery(ar); } if (val) set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); else clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%d\n", test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags)); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_btcoex = { .read = ath10k_read_btcoex, .write = ath10k_write_btcoex, .open = simple_open }; static ssize_t ath10k_write_enable_extd_tx_stats(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; u32 filter; int ret; if (kstrtouint_from_user(ubuf, count, 0, &filter)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ar->debug.enable_extd_tx_stats = filter; ret = count; goto out; } if (filter == ar->debug.enable_extd_tx_stats) { ret = count; goto out; } ar->debug.enable_extd_tx_stats = filter; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_enable_extd_tx_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%08x\n", ar->debug.enable_extd_tx_stats); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_enable_extd_tx_stats = { .read = ath10k_read_enable_extd_tx_stats, .write = ath10k_write_enable_extd_tx_stats, .open = simple_open }; static ssize_t ath10k_write_peer_stats(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; ssize_t ret; bool val; ret = kstrtobool_from_user(ubuf, count, &val); if (ret) return ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_RESTARTED) { ret = -ENETDOWN; goto exit; } if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) { ret = count; goto exit; } if (val) set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); else clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags); ath10k_info(ar, "restarting firmware due to Peer stats change"); ath10k_core_start_recovery(ar); ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char buf[32]; struct ath10k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%d\n", test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags)); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_peer_stats = { .read = ath10k_read_peer_stats, .write = ath10k_write_peer_stats, .open = simple_open }; static ssize_t ath10k_debug_fw_checksums_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; size_t len = 0, buf_len = 4096; ssize_t ret_cnt; char *buf; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&ar->conf_mutex); len += scnprintf(buf + len, buf_len - len, "firmware-N.bin\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data, ar->normal_mode_fw.fw_file.firmware->size)); len += scnprintf(buf + len, buf_len - len, "athwlan\t\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data, ar->normal_mode_fw.fw_file.firmware_len)); len += scnprintf(buf + len, buf_len - len, "otp\t\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.fw_file.otp_data, ar->normal_mode_fw.fw_file.otp_len)); len += scnprintf(buf + len, buf_len - len, "codeswap\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data, ar->normal_mode_fw.fw_file.codeswap_len)); len += scnprintf(buf + len, buf_len - len, "board-N.bin\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.board->data, ar->normal_mode_fw.board->size)); len += scnprintf(buf + len, buf_len - len, "board\t\t\t%08x\n", crc32_le(0, ar->normal_mode_fw.board_data, ar->normal_mode_fw.board_len)); ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); mutex_unlock(&ar->conf_mutex); kfree(buf); return ret_cnt; } static const struct file_operations fops_fw_checksums = { .read = ath10k_debug_fw_checksums_read, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_sta_tid_stats_mask_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; char buf[32]; size_t len; len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->sta_tid_stats_mask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath10k_sta_tid_stats_mask_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; ssize_t ret; u32 mask; ret = kstrtoint_from_user(user_buf, count, 0, &mask); if (ret) return ret; ar->sta_tid_stats_mask = mask; return count; } static const struct file_operations fops_sta_tid_stats_mask = { .read = ath10k_sta_tid_stats_mask_read, .write = ath10k_sta_tid_stats_mask_write, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath10k_debug_tpc_stats_final_request(struct ath10k *ar) { int ret; unsigned long time_left; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->debug.tpc_complete); ret = ath10k_wmi_pdev_get_tpc_table_cmdid(ar, WMI_TPC_CONFIG_PARAM); if (ret) { ath10k_warn(ar, "failed to request tpc table cmdid: %d\n", ret); return ret; } time_left = wait_for_completion_timeout(&ar->debug.tpc_complete, 1 * HZ); if (time_left == 0) return -ETIMEDOUT; return 0; } static int ath10k_tpc_stats_final_open(struct inode *inode, struct file *file) { struct ath10k *ar = inode->i_private; void *buf; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE); if (!buf) { ret = -ENOMEM; goto err_unlock; } ret = ath10k_debug_tpc_stats_final_request(ar); if (ret) { ath10k_warn(ar, "failed to request tpc stats final: %d\n", ret); goto err_free; } ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); file->private_data = buf; mutex_unlock(&ar->conf_mutex); return 0; err_free: vfree(buf); err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath10k_tpc_stats_final_release(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static ssize_t ath10k_tpc_stats_final_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; unsigned int len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_tpc_stats_final = { .open = ath10k_tpc_stats_final_open, .release = ath10k_tpc_stats_final_release, .read = ath10k_tpc_stats_final_read, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_write_warm_hw_reset(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; bool val; if (kstrtobool_from_user(user_buf, count, &val)) return -EFAULT; if (!val) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_ON) { ret = -ENETDOWN; goto exit; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset, WMI_RST_MODE_WARM_RESET); if (ret) { ath10k_warn(ar, "failed to enable warm hw reset: %d\n", ret); goto exit; } ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_warm_hw_reset = { .write = ath10k_write_warm_hw_reset, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static void ath10k_peer_ps_state_disable(void *data, struct ieee80211_sta *sta) { struct ath10k *ar = data; struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; spin_lock_bh(&ar->data_lock); arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; spin_unlock_bh(&ar->data_lock); } static ssize_t ath10k_write_ps_state_enable(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int ret; u32 param; u8 ps_state_enable; if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable)) return -EINVAL; if (ps_state_enable > 1) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->ps_state_enable == ps_state_enable) { ret = count; goto exit; } param = ar->wmi.pdev_param->peer_sta_ps_statechg_enable; ret = ath10k_wmi_pdev_set_param(ar, param, ps_state_enable); if (ret) { ath10k_warn(ar, "failed to enable ps_state_enable: %d\n", ret); goto exit; } ar->ps_state_enable = ps_state_enable; if (!ar->ps_state_enable) ieee80211_iterate_stations_atomic(ar->hw, ath10k_peer_ps_state_disable, ar); ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath10k_read_ps_state_enable(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; int len = 0; char buf[32]; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%d\n", ar->ps_state_enable); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_ps_state_enable = { .read = ath10k_read_ps_state_enable, .write = ath10k_write_ps_state_enable, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath10k_write_reset_htt_stats(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; unsigned long reset; int ret; ret = kstrtoul_from_user(user_buf, count, 0, &reset); if (ret) return ret; if (reset == 0 || reset > 0x1ffff) return -EINVAL; mutex_lock(&ar->conf_mutex); ar->debug.reset_htt_stats = reset; ret = ath10k_debug_htt_stats_req(ar); if (ret) goto out; ar->debug.reset_htt_stats = 0; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_reset_htt_stats = { .write = ath10k_write_reset_htt_stats, .owner = THIS_MODULE, .open = simple_open, .llseek = default_llseek, }; int ath10k_debug_create(struct ath10k *ar) { ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN); if (!ar->debug.cal_data) return -ENOMEM; INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.peers); INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd); return 0; } void ath10k_debug_destroy(struct ath10k *ar) { vfree(ar->debug.cal_data); ar->debug.cal_data = NULL; ath10k_debug_fw_stats_reset(ar); kfree(ar->debug.tpc_stats); kfree(ar->debug.tpc_stats_final); } int ath10k_debug_register(struct ath10k *ar) { ar->debug.debugfs_phy = debugfs_create_dir("ath10k", ar->hw->wiphy->debugfsdir); if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) { if (IS_ERR(ar->debug.debugfs_phy)) return PTR_ERR(ar->debug.debugfs_phy); return -ENOMEM; } INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, ath10k_debug_htt_stats_dwork); init_completion(&ar->debug.tpc_complete); init_completion(&ar->debug.fw_stats_complete); debugfs_create_file("fw_stats", 0400, ar->debug.debugfs_phy, ar, &fops_fw_stats); debugfs_create_file("fw_reset_stats", 0400, ar->debug.debugfs_phy, ar, &fops_fw_reset_stats); debugfs_create_file("wmi_services", 0400, ar->debug.debugfs_phy, ar, &fops_wmi_services); debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash); debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar, &fops_reg_addr); debugfs_create_file("reg_value", 0600, ar->debug.debugfs_phy, ar, &fops_reg_value); debugfs_create_file("mem_value", 0600, ar->debug.debugfs_phy, ar, &fops_mem_value); debugfs_create_file("chip_id", 0400, ar->debug.debugfs_phy, ar, &fops_chip_id); debugfs_create_file("htt_stats_mask", 0600, ar->debug.debugfs_phy, ar, &fops_htt_stats_mask); debugfs_create_file("htt_max_amsdu_ampdu", 0600, ar->debug.debugfs_phy, ar, &fops_htt_max_amsdu_ampdu); debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar, &fops_fw_dbglog); if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, ar->normal_mode_fw.fw_file.fw_features)) { debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, &fops_cal_data); debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, &fops_nf_cal_period); } debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar, &fops_ani_enable); if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy, ar, &fops_simulate_radar); debugfs_create_bool("dfs_block_radar_events", 0200, ar->debug.debugfs_phy, &ar->dfs_block_radar_events); debugfs_create_file("dfs_stats", 0400, ar->debug.debugfs_phy, ar, &fops_dfs_stats); } debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar, &fops_pktlog_filter); if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map)) debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar, &fops_quiet_period); debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar, &fops_tpc_stats); if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar, &fops_btcoex); if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar, &fops_peer_stats); debugfs_create_file("enable_extd_tx_stats", 0644, ar->debug.debugfs_phy, ar, &fops_enable_extd_tx_stats); } debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar, &fops_fw_checksums); if (IS_ENABLED(CONFIG_MAC80211_DEBUGFS)) debugfs_create_file("sta_tid_stats_mask", 0600, ar->debug.debugfs_phy, ar, &fops_sta_tid_stats_mask); if (test_bit(WMI_SERVICE_TPC_STATS_FINAL, ar->wmi.svc_map)) debugfs_create_file("tpc_stats_final", 0400, ar->debug.debugfs_phy, ar, &fops_tpc_stats_final); if (test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map)) debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar, &fops_warm_hw_reset); debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar, &fops_ps_state_enable); debugfs_create_file("reset_htt_stats", 0200, ar->debug.debugfs_phy, ar, &fops_reset_htt_stats); return 0; } void ath10k_debug_unregister(struct ath10k *ar) { cancel_delayed_work_sync(&ar->debug.htt_stats_dwork); } #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG void __ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (ath10k_debug_mask & mask) dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf); trace_ath10k_log_dbg(ar, mask, &vaf); va_end(args); } EXPORT_SYMBOL(__ath10k_dbg); void ath10k_dbg_dump(struct ath10k *ar, enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len) { char linebuf[256]; size_t linebuflen; const void *ptr; if (ath10k_debug_mask & mask) { if (msg) __ath10k_dbg(ar, mask, "%s\n", msg); for (ptr = buf; (ptr - buf) < len; ptr += 16) { linebuflen = 0; linebuflen += scnprintf(linebuf + linebuflen, sizeof(linebuf) - linebuflen, "%s%08x: ", (prefix ? prefix : ""), (unsigned int)(ptr - buf)); hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1, linebuf + linebuflen, sizeof(linebuf) - linebuflen, true); dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf); } } /* tracing code doesn't like null strings :/ */ trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "", buf, len); } EXPORT_SYMBOL(ath10k_dbg_dump); #endif /* CONFIG_ATH10K_DEBUG */ |
3 3 2 2 1 1 1 1 1 5 5 5 1 5 5 5 5 5 5 5 4 1 1 1 1 1 3 3 1 2 1 1 1 1 1 1 1 1 1 1 3 5 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 | // SPDX-License-Identifier: GPL-2.0-or-later /**************************************************************** * * kaweth.c - driver for KL5KUSB101 based USB->Ethernet * * (c) 2000 Interlan Communications * (c) 2000 Stephane Alnet * (C) 2001 Brad Hards * (C) 2002 Oliver Neukum * * Original author: The Zapman <zapman@interlan.net> * Inspired by, and much credit goes to Michael Rothwell * <rothwell@interlan.net> for the test equipment, help, and patience * Based off of (and with thanks to) Petko Manolov's pegaus.c driver. * Also many thanks to Joel Silverman and Ed Surprenant at Kawasaki * for providing the firmware and driver resources. * ****************************************************************/ /* TODO: * Develop test procedures for USB net interfaces * Run test procedures * Fix bugs from previous two steps * Snoop other OSs for any tricks we're not doing * Reduce arbitrary timeouts * Smart multicast support * Temporary MAC change support * Tunable SOFs parameter - ioctl()? * Ethernet stats collection * Code formatting improvements */ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/usb.h> #include <linux/types.h> #include <linux/ethtool.h> #include <linux/dma-mapping.h> #include <linux/wait.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #undef DEBUG #define KAWETH_MTU 1514 #define KAWETH_BUF_SIZE 1664 #define KAWETH_TX_TIMEOUT (5 * HZ) #define KAWETH_SCRATCH_SIZE 32 #define KAWETH_FIRMWARE_BUF_SIZE 4096 #define KAWETH_CONTROL_TIMEOUT (30000) #define KAWETH_STATUS_BROKEN 0x0000001 #define KAWETH_STATUS_CLOSING 0x0000002 #define KAWETH_STATUS_SUSPENDING 0x0000004 #define KAWETH_STATUS_BLOCKED (KAWETH_STATUS_CLOSING | KAWETH_STATUS_SUSPENDING) #define KAWETH_PACKET_FILTER_PROMISCUOUS 0x01 #define KAWETH_PACKET_FILTER_ALL_MULTICAST 0x02 #define KAWETH_PACKET_FILTER_DIRECTED 0x04 #define KAWETH_PACKET_FILTER_BROADCAST 0x08 #define KAWETH_PACKET_FILTER_MULTICAST 0x10 /* Table 7 */ #define KAWETH_COMMAND_GET_ETHERNET_DESC 0x00 #define KAWETH_COMMAND_MULTICAST_FILTERS 0x01 #define KAWETH_COMMAND_SET_PACKET_FILTER 0x02 #define KAWETH_COMMAND_STATISTICS 0x03 #define KAWETH_COMMAND_SET_TEMP_MAC 0x06 #define KAWETH_COMMAND_GET_TEMP_MAC 0x07 #define KAWETH_COMMAND_SET_URB_SIZE 0x08 #define KAWETH_COMMAND_SET_SOFS_WAIT 0x09 #define KAWETH_COMMAND_SCAN 0xFF #define KAWETH_SOFS_TO_WAIT 0x05 #define INTBUFFERSIZE 4 #define STATE_OFFSET 0 #define STATE_MASK 0x40 #define STATE_SHIFT 5 #define IS_BLOCKED(s) (s & KAWETH_STATUS_BLOCKED) MODULE_AUTHOR("Michael Zappe <zapman@interlan.net>, Stephane Alnet <stephane@u-picardie.fr>, Brad Hards <bhards@bigpond.net.au> and Oliver Neukum <oliver@neukum.org>"); MODULE_DESCRIPTION("KL5USB101 USB Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("kaweth/new_code.bin"); MODULE_FIRMWARE("kaweth/new_code_fix.bin"); MODULE_FIRMWARE("kaweth/trigger_code.bin"); MODULE_FIRMWARE("kaweth/trigger_code_fix.bin"); static const char driver_name[] = "kaweth"; static int kaweth_probe( struct usb_interface *intf, const struct usb_device_id *id /* from id_table */ ); static void kaweth_disconnect(struct usb_interface *intf); static int kaweth_suspend(struct usb_interface *intf, pm_message_t message); static int kaweth_resume(struct usb_interface *intf); /**************************************************************** * usb_device_id ****************************************************************/ static const struct usb_device_id usb_klsi_table[] = { { USB_DEVICE(0x03e8, 0x0008) }, /* AOX Endpoints USB Ethernet */ { USB_DEVICE(0x04bb, 0x0901) }, /* I-O DATA USB-ET/T */ { USB_DEVICE(0x0506, 0x03e8) }, /* 3Com 3C19250 */ { USB_DEVICE(0x0506, 0x11f8) }, /* 3Com 3C460 */ { USB_DEVICE(0x0557, 0x2002) }, /* ATEN USB Ethernet */ { USB_DEVICE(0x0557, 0x4000) }, /* D-Link DSB-650C */ { USB_DEVICE(0x0565, 0x0002) }, /* Peracom Enet */ { USB_DEVICE(0x0565, 0x0003) }, /* Optus@Home UEP1045A */ { USB_DEVICE(0x0565, 0x0005) }, /* Peracom Enet2 */ { USB_DEVICE(0x05e9, 0x0008) }, /* KLSI KL5KUSB101B */ { USB_DEVICE(0x05e9, 0x0009) }, /* KLSI KL5KUSB101B (Board change) */ { USB_DEVICE(0x066b, 0x2202) }, /* Linksys USB10T */ { USB_DEVICE(0x06e1, 0x0008) }, /* ADS USB-10BT */ { USB_DEVICE(0x06e1, 0x0009) }, /* ADS USB-10BT */ { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */ { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x085a, 0x0009) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x087d, 0x5704) }, /* Jaton USB Ethernet Device Adapter */ { USB_DEVICE(0x0951, 0x0008) }, /* Kingston Technology USB Ethernet Adapter */ { USB_DEVICE(0x095a, 0x3003) }, /* Portsmith Express Ethernet Adapter */ { USB_DEVICE(0x10bd, 0x1427) }, /* ASANTE USB To Ethernet Adapter */ { USB_DEVICE(0x1342, 0x0204) }, /* Mobility USB-Ethernet Adapter */ { USB_DEVICE(0x13d2, 0x0400) }, /* Shark Pocket Adapter */ { USB_DEVICE(0x1485, 0x0001) }, /* Silicom U2E */ { USB_DEVICE(0x1485, 0x0002) }, /* Psion Dacom Gold Port Ethernet */ { USB_DEVICE(0x1645, 0x0005) }, /* Entrega E45 */ { USB_DEVICE(0x1645, 0x0008) }, /* Entrega USB Ethernet Adapter */ { USB_DEVICE(0x1645, 0x8005) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x1668, 0x0323) }, /* Actiontec USB Ethernet */ { USB_DEVICE(0x2001, 0x4000) }, /* D-link DSB-650C */ {} /* Null terminator */ }; MODULE_DEVICE_TABLE (usb, usb_klsi_table); /**************************************************************** * kaweth_driver ****************************************************************/ static struct usb_driver kaweth_driver = { .name = driver_name, .probe = kaweth_probe, .disconnect = kaweth_disconnect, .suspend = kaweth_suspend, .resume = kaweth_resume, .id_table = usb_klsi_table, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; typedef __u8 eth_addr_t[6]; /**************************************************************** * usb_eth_dev ****************************************************************/ struct usb_eth_dev { char *name; __u16 vendor; __u16 device; void *pdata; }; /**************************************************************** * kaweth_ethernet_configuration * Refer Table 8 ****************************************************************/ struct kaweth_ethernet_configuration { __u8 size; __u8 reserved1; __u8 reserved2; eth_addr_t hw_addr; __u32 statistics_mask; __le16 segment_size; __u16 max_multicast_filters; __u8 reserved3; } __packed; /**************************************************************** * kaweth_device ****************************************************************/ struct kaweth_device { spinlock_t device_lock; __u32 status; int end; int suspend_lowmem_rx; int suspend_lowmem_ctrl; int linkstate; int opened; struct delayed_work lowmem_work; struct usb_device *dev; struct usb_interface *intf; struct net_device *net; wait_queue_head_t term_wait; struct urb *rx_urb; struct urb *tx_urb; struct urb *irq_urb; dma_addr_t intbufferhandle; __u8 *intbuffer; dma_addr_t rxbufferhandle; __u8 *rx_buf; struct sk_buff *tx_skb; __u8 *firmware_buf; __u8 scratch[KAWETH_SCRATCH_SIZE]; __u16 packet_filter_bitmap; struct kaweth_ethernet_configuration configuration; }; /**************************************************************** * kaweth_read_configuration ****************************************************************/ static int kaweth_read_configuration(struct kaweth_device *kaweth) { return usb_control_msg(kaweth->dev, usb_rcvctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_GET_ETHERNET_DESC, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, &kaweth->configuration, sizeof(kaweth->configuration), KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_set_urb_size ****************************************************************/ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size) { netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_URB_SIZE, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, urb_size, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_set_sofs_wait ****************************************************************/ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait) { netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_SOFS_WAIT, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, sofs_wait, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_set_receive_filter ****************************************************************/ static int kaweth_set_receive_filter(struct kaweth_device *kaweth, __u16 receive_filter) { netdev_dbg(kaweth->net, "Set receive filter to %d\n", (unsigned)receive_filter); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_PACKET_FILTER, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, receive_filter, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_download_firmware ****************************************************************/ static int kaweth_download_firmware(struct kaweth_device *kaweth, const char *fwname, __u8 interrupt, __u8 type) { const struct firmware *fw; int data_len; int ret; ret = request_firmware(&fw, fwname, &kaweth->dev->dev); if (ret) { dev_err(&kaweth->intf->dev, "Firmware request failed\n"); return ret; } if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { dev_err(&kaweth->intf->dev, "Firmware too big: %zu\n", fw->size); release_firmware(fw); return -ENOSPC; } data_len = fw->size; memcpy(kaweth->firmware_buf, fw->data, fw->size); release_firmware(fw); kaweth->firmware_buf[2] = (data_len & 0xFF) - 7; kaweth->firmware_buf[3] = data_len >> 8; kaweth->firmware_buf[4] = type; kaweth->firmware_buf[5] = interrupt; netdev_dbg(kaweth->net, "High: %i, Low:%i\n", kaweth->firmware_buf[3], kaweth->firmware_buf[2]); netdev_dbg(kaweth->net, "Downloading firmware at %p to kaweth device at %p\n", kaweth->firmware_buf, kaweth); netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SCAN, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, 0, 0, kaweth->firmware_buf, data_len, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_trigger_firmware ****************************************************************/ static int kaweth_trigger_firmware(struct kaweth_device *kaweth, __u8 interrupt) { kaweth->firmware_buf[0] = 0xB6; kaweth->firmware_buf[1] = 0xC3; kaweth->firmware_buf[2] = 0x01; kaweth->firmware_buf[3] = 0x00; kaweth->firmware_buf[4] = 0x06; kaweth->firmware_buf[5] = interrupt; kaweth->firmware_buf[6] = 0x00; kaweth->firmware_buf[7] = 0x00; return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SCAN, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, 0, 0, (void *)kaweth->firmware_buf, 8, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_reset ****************************************************************/ static int kaweth_reset(struct kaweth_device *kaweth) { int result; result = usb_reset_configuration(kaweth->dev); mdelay(10); netdev_dbg(kaweth->net, "kaweth_reset() returns %d.\n", result); return result; } static void kaweth_usb_receive(struct urb *); static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t); /**************************************************************** int_callback *****************************************************************/ static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf) { int status; status = usb_submit_urb (kaweth->irq_urb, mf); if (unlikely(status == -ENOMEM)) { kaweth->suspend_lowmem_ctrl = 1; schedule_delayed_work(&kaweth->lowmem_work, HZ/4); } else { kaweth->suspend_lowmem_ctrl = 0; } if (status) dev_err(&kaweth->intf->dev, "can't resubmit intr, %s-%s, status %d\n", kaweth->dev->bus->bus_name, kaweth->dev->devpath, status); } static void int_callback(struct urb *u) { struct kaweth_device *kaweth = u->context; int act_state; int status = u->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ goto resubmit; } /* we check the link state to report changes */ if (kaweth->linkstate != (act_state = ( kaweth->intbuffer[STATE_OFFSET] | STATE_MASK) >> STATE_SHIFT)) { if (act_state) netif_carrier_on(kaweth->net); else netif_carrier_off(kaweth->net); kaweth->linkstate = act_state; } resubmit: kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); } static void kaweth_resubmit_tl(struct work_struct *work) { struct kaweth_device *kaweth = container_of(work, struct kaweth_device, lowmem_work.work); if (IS_BLOCKED(kaweth->status)) return; if (kaweth->suspend_lowmem_rx) kaweth_resubmit_rx_urb(kaweth, GFP_NOIO); if (kaweth->suspend_lowmem_ctrl) kaweth_resubmit_int_urb(kaweth, GFP_NOIO); } /**************************************************************** * kaweth_resubmit_rx_urb ****************************************************************/ static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth, gfp_t mem_flags) { int result; usb_fill_bulk_urb(kaweth->rx_urb, kaweth->dev, usb_rcvbulkpipe(kaweth->dev, 1), kaweth->rx_buf, KAWETH_BUF_SIZE, kaweth_usb_receive, kaweth); kaweth->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; kaweth->rx_urb->transfer_dma = kaweth->rxbufferhandle; if((result = usb_submit_urb(kaweth->rx_urb, mem_flags))) { if (result == -ENOMEM) { kaweth->suspend_lowmem_rx = 1; schedule_delayed_work(&kaweth->lowmem_work, HZ/4); } dev_err(&kaweth->intf->dev, "resubmitting rx_urb %d failed\n", result); } else { kaweth->suspend_lowmem_rx = 0; } return result; } static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth, bool may_sleep); /**************************************************************** * kaweth_usb_receive ****************************************************************/ static void kaweth_usb_receive(struct urb *urb) { struct device *dev = &urb->dev->dev; struct kaweth_device *kaweth = urb->context; struct net_device *net = kaweth->net; int status = urb->status; unsigned long flags; int count = urb->actual_length; int count2 = urb->transfer_buffer_length; __u16 pkt_len = le16_to_cpup((__le16 *)kaweth->rx_buf); struct sk_buff *skb; if (unlikely(status == -EPIPE)) { net->stats.rx_errors++; kaweth->end = 1; wake_up(&kaweth->term_wait); dev_dbg(dev, "Status was -EPIPE.\n"); return; } if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) { /* we are killed - set a flag and wake the disconnect handler */ kaweth->end = 1; wake_up(&kaweth->term_wait); dev_dbg(dev, "Status was -ECONNRESET or -ESHUTDOWN.\n"); return; } if (unlikely(status == -EPROTO || status == -ETIME || status == -EILSEQ)) { net->stats.rx_errors++; dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n"); return; } if (unlikely(status == -EOVERFLOW)) { net->stats.rx_errors++; dev_dbg(dev, "Status was -EOVERFLOW.\n"); } spin_lock_irqsave(&kaweth->device_lock, flags); if (IS_BLOCKED(kaweth->status)) { spin_unlock_irqrestore(&kaweth->device_lock, flags); return; } spin_unlock_irqrestore(&kaweth->device_lock, flags); if(status && status != -EREMOTEIO && count != 1) { dev_err(&kaweth->intf->dev, "%s RX status: %d count: %d packet_len: %d\n", net->name, status, count, (int)pkt_len); kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } if(kaweth->net && (count > 2)) { if(pkt_len > (count - 2)) { dev_err(&kaweth->intf->dev, "Packet length too long for USB frame (pkt_len: %x, count: %x)\n", pkt_len, count); dev_err(&kaweth->intf->dev, "Packet len & 2047: %x\n", pkt_len & 2047); dev_err(&kaweth->intf->dev, "Count 2: %x\n", count2); kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } if(!(skb = dev_alloc_skb(pkt_len+2))) { kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, net); netif_rx(skb); net->stats.rx_packets++; net->stats.rx_bytes += pkt_len; } kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); } /**************************************************************** * kaweth_open ****************************************************************/ static int kaweth_open(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); int res; res = usb_autopm_get_interface(kaweth->intf); if (res) { dev_err(&kaweth->intf->dev, "Interface cannot be resumed.\n"); return -EIO; } res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL); if (res) goto err_out; usb_fill_int_urb( kaweth->irq_urb, kaweth->dev, usb_rcvintpipe(kaweth->dev, 3), kaweth->intbuffer, INTBUFFERSIZE, int_callback, kaweth, 250); /* overriding the descriptor */ kaweth->irq_urb->transfer_dma = kaweth->intbufferhandle; kaweth->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL); if (res) { usb_kill_urb(kaweth->rx_urb); goto err_out; } kaweth->opened = 1; netif_start_queue(net); kaweth_async_set_rx_mode(kaweth, true); return 0; err_out: usb_autopm_put_interface(kaweth->intf); return -EIO; } /**************************************************************** * kaweth_kill_urbs ****************************************************************/ static void kaweth_kill_urbs(struct kaweth_device *kaweth) { usb_kill_urb(kaweth->irq_urb); usb_kill_urb(kaweth->rx_urb); usb_kill_urb(kaweth->tx_urb); cancel_delayed_work_sync(&kaweth->lowmem_work); /* a scheduled work may have resubmitted, we hit them again */ usb_kill_urb(kaweth->irq_urb); usb_kill_urb(kaweth->rx_urb); } /**************************************************************** * kaweth_close ****************************************************************/ static int kaweth_close(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); netif_stop_queue(net); kaweth->opened = 0; kaweth->status |= KAWETH_STATUS_CLOSING; kaweth_kill_urbs(kaweth); kaweth->status &= ~KAWETH_STATUS_CLOSING; usb_autopm_put_interface(kaweth->intf); return 0; } static u32 kaweth_get_link(struct net_device *dev) { struct kaweth_device *kaweth = netdev_priv(dev); return kaweth->linkstate; } static const struct ethtool_ops ops = { .get_link = kaweth_get_link }; /**************************************************************** * kaweth_usb_transmit_complete ****************************************************************/ static void kaweth_usb_transmit_complete(struct urb *urb) { struct kaweth_device *kaweth = urb->context; struct sk_buff *skb = kaweth->tx_skb; int status = urb->status; if (unlikely(status != 0)) if (status != -ENOENT) dev_dbg(&urb->dev->dev, "%s: TX status %d.\n", kaweth->net->name, status); netif_wake_queue(kaweth->net); dev_kfree_skb_irq(skb); } /**************************************************************** * kaweth_start_xmit ****************************************************************/ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb, struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); __le16 *private_header; int res; spin_lock_irq(&kaweth->device_lock); kaweth_async_set_rx_mode(kaweth, false); netif_stop_queue(net); if (IS_BLOCKED(kaweth->status)) { goto skip; } /* We now decide whether we can put our special header into the sk_buff */ if (skb_cow_head(skb, 2)) { net->stats.tx_errors++; netif_start_queue(net); spin_unlock_irq(&kaweth->device_lock); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } private_header = __skb_push(skb, 2); *private_header = cpu_to_le16(skb->len-2); kaweth->tx_skb = skb; usb_fill_bulk_urb(kaweth->tx_urb, kaweth->dev, usb_sndbulkpipe(kaweth->dev, 2), private_header, skb->len, kaweth_usb_transmit_complete, kaweth); kaweth->end = 0; if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC))) { dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res); skip: net->stats.tx_errors++; netif_start_queue(net); dev_kfree_skb_irq(skb); } else { net->stats.tx_packets++; net->stats.tx_bytes += skb->len; } spin_unlock_irq(&kaweth->device_lock); return NETDEV_TX_OK; } /**************************************************************** * kaweth_set_rx_mode ****************************************************************/ static void kaweth_set_rx_mode(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); __u16 packet_filter_bitmap = KAWETH_PACKET_FILTER_DIRECTED | KAWETH_PACKET_FILTER_BROADCAST | KAWETH_PACKET_FILTER_MULTICAST; netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap); netif_stop_queue(net); if (net->flags & IFF_PROMISC) { packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS; } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) { packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST; } kaweth->packet_filter_bitmap = packet_filter_bitmap; netif_wake_queue(net); } /**************************************************************** * kaweth_async_set_rx_mode ****************************************************************/ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth, bool may_sleep) { int ret; __u16 packet_filter_bitmap = kaweth->packet_filter_bitmap; kaweth->packet_filter_bitmap = 0; if (packet_filter_bitmap == 0) return; if (!may_sleep) return; ret = usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_PACKET_FILTER, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, packet_filter_bitmap, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); if (ret < 0) dev_err(&kaweth->intf->dev, "Failed to set Rx mode: %d\n", ret); else netdev_dbg(kaweth->net, "Set Rx mode to %d\n", packet_filter_bitmap); } /**************************************************************** * kaweth_tx_timeout ****************************************************************/ static void kaweth_tx_timeout(struct net_device *net, unsigned int txqueue) { struct kaweth_device *kaweth = netdev_priv(net); dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name); net->stats.tx_errors++; netif_trans_update(net); usb_unlink_urb(kaweth->tx_urb); } /**************************************************************** * kaweth_suspend ****************************************************************/ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message) { struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status |= KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); kaweth_kill_urbs(kaweth); return 0; } /**************************************************************** * kaweth_resume ****************************************************************/ static int kaweth_resume(struct usb_interface *intf) { struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status &= ~KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); if (!kaweth->opened) return 0; kaweth_resubmit_rx_urb(kaweth, GFP_NOIO); kaweth_resubmit_int_urb(kaweth, GFP_NOIO); return 0; } /**************************************************************** * kaweth_probe ****************************************************************/ static const struct net_device_ops kaweth_netdev_ops = { .ndo_open = kaweth_open, .ndo_stop = kaweth_close, .ndo_start_xmit = kaweth_start_xmit, .ndo_tx_timeout = kaweth_tx_timeout, .ndo_set_rx_mode = kaweth_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int kaweth_probe( struct usb_interface *intf, const struct usb_device_id *id /* from id_table */ ) { struct device *dev = &intf->dev; struct usb_device *udev = interface_to_usbdev(intf); struct kaweth_device *kaweth; struct net_device *netdev; const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int result = 0; int rv = -EIO; dev_dbg(dev, "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", udev->devnum, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), le16_to_cpu(udev->descriptor.bcdDevice)); dev_dbg(dev, "Device at %p\n", udev); dev_dbg(dev, "Descriptor length: %x type: %x\n", (int)udev->descriptor.bLength, (int)udev->descriptor.bDescriptorType); netdev = alloc_etherdev(sizeof(*kaweth)); if (!netdev) return -ENOMEM; kaweth = netdev_priv(netdev); kaweth->dev = udev; kaweth->net = netdev; kaweth->intf = intf; spin_lock_init(&kaweth->device_lock); init_waitqueue_head(&kaweth->term_wait); dev_dbg(dev, "Resetting.\n"); kaweth_reset(kaweth); /* * If high byte of bcdDevice is nonzero, firmware is already * downloaded. Don't try to do it again, or we'll hang the device. */ if (le16_to_cpu(udev->descriptor.bcdDevice) >> 8) { dev_info(dev, "Firmware present in device.\n"); } else { /* Download the firmware */ dev_info(dev, "Downloading firmware...\n"); kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); if (!kaweth->firmware_buf) { rv = -ENOMEM; goto err_free_netdev; } if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code.bin", 100, 2)) < 0) { dev_err(dev, "Error downloading firmware (%d)\n", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code_fix.bin", 100, 3)) < 0) { dev_err(dev, "Error downloading firmware fix (%d)\n", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/trigger_code.bin", 126, 2)) < 0) { dev_err(dev, "Error downloading trigger code (%d)\n", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/trigger_code_fix.bin", 126, 3)) < 0) { dev_err(dev, "Error downloading trigger code fix (%d)\n", result); goto err_fw; } if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) { dev_err(dev, "Error triggering firmware (%d)\n", result); goto err_fw; } /* Device will now disappear for a moment... */ dev_info(dev, "Firmware loaded. I'll be back...\n"); err_fw: free_page((unsigned long)kaweth->firmware_buf); free_netdev(netdev); return -EIO; } result = kaweth_read_configuration(kaweth); if(result < 0) { dev_err(dev, "Error reading configuration (%d), no net device created\n", result); goto err_free_netdev; } dev_info(dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask); dev_info(dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1)); dev_info(dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size)); dev_info(dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr); if(!memcmp(&kaweth->configuration.hw_addr, &bcast_addr, sizeof(bcast_addr))) { dev_err(dev, "Firmware not functioning properly, no net device created\n"); goto err_free_netdev; } if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) { dev_dbg(dev, "Error setting URB size\n"); goto err_free_netdev; } if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) { dev_err(dev, "Error setting SOFS wait\n"); goto err_free_netdev; } result = kaweth_set_receive_filter(kaweth, KAWETH_PACKET_FILTER_DIRECTED | KAWETH_PACKET_FILTER_BROADCAST | KAWETH_PACKET_FILTER_MULTICAST); if(result < 0) { dev_err(dev, "Error setting receive filter\n"); goto err_free_netdev; } dev_dbg(dev, "Initializing net device.\n"); kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->tx_urb) goto err_free_netdev; kaweth->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->rx_urb) goto err_only_tx; kaweth->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->irq_urb) goto err_tx_and_rx; kaweth->intbuffer = usb_alloc_coherent( kaweth->dev, INTBUFFERSIZE, GFP_KERNEL, &kaweth->intbufferhandle); if (!kaweth->intbuffer) goto err_tx_and_rx_and_irq; kaweth->rx_buf = usb_alloc_coherent( kaweth->dev, KAWETH_BUF_SIZE, GFP_KERNEL, &kaweth->rxbufferhandle); if (!kaweth->rx_buf) goto err_all_but_rxbuf; memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr)); eth_hw_addr_set(netdev, (u8 *)&kaweth->configuration.hw_addr); netdev->netdev_ops = &kaweth_netdev_ops; netdev->watchdog_timeo = KAWETH_TX_TIMEOUT; netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size); netdev->ethtool_ops = &ops; /* kaweth is zeroed as part of alloc_netdev */ INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); usb_set_intfdata(intf, kaweth); SET_NETDEV_DEV(netdev, dev); if (register_netdev(netdev) != 0) { dev_err(dev, "Error registering netdev.\n"); goto err_intfdata; } dev_info(dev, "kaweth interface created at %s\n", kaweth->net->name); return 0; err_intfdata: usb_set_intfdata(intf, NULL); usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); err_all_but_rxbuf: usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); err_tx_and_rx_and_irq: usb_free_urb(kaweth->irq_urb); err_tx_and_rx: usb_free_urb(kaweth->rx_urb); err_only_tx: usb_free_urb(kaweth->tx_urb); err_free_netdev: free_netdev(netdev); return rv; } /**************************************************************** * kaweth_disconnect ****************************************************************/ static void kaweth_disconnect(struct usb_interface *intf) { struct kaweth_device *kaweth = usb_get_intfdata(intf); struct net_device *netdev; usb_set_intfdata(intf, NULL); if (!kaweth) { dev_warn(&intf->dev, "unregistering non-existent device\n"); return; } netdev = kaweth->net; netdev_dbg(kaweth->net, "Unregistering net device\n"); unregister_netdev(netdev); usb_free_urb(kaweth->rx_urb); usb_free_urb(kaweth->tx_urb); usb_free_urb(kaweth->irq_urb); usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); free_netdev(netdev); } module_usb_driver(kaweth_driver); |
21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Keytouch devices not fully compliant with HID standard * * Copyright (c) 2011 Jiri Kosina */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* Replace the broken report descriptor of this device with rather * a default one */ static const __u8 keytouch_fixed_rdesc[] = { 0x05, 0x01, 0x09, 0x06, 0xa1, 0x01, 0x05, 0x07, 0x19, 0xe0, 0x29, 0xe7, 0x15, 0x00, 0x25, 0x01, 0x75, 0x01, 0x95, 0x08, 0x81, 0x02, 0x95, 0x01, 0x75, 0x08, 0x81, 0x01, 0x95, 0x03, 0x75, 0x01, 0x05, 0x08, 0x19, 0x01, 0x29, 0x03, 0x91, 0x02, 0x95, 0x05, 0x75, 0x01, 0x91, 0x01, 0x95, 0x06, 0x75, 0x08, 0x15, 0x00, 0x26, 0xff, 0x00, 0x05, 0x07, 0x19, 0x00, 0x2a, 0xff, 0x00, 0x81, 0x00, 0xc0 }; static const __u8 *keytouch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { hid_info(hdev, "fixing up Keytouch IEC report descriptor\n"); *rsize = sizeof(keytouch_fixed_rdesc); return keytouch_fixed_rdesc; } static const struct hid_device_id keytouch_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, { } }; MODULE_DEVICE_TABLE(hid, keytouch_devices); static struct hid_driver keytouch_driver = { .name = "keytouch", .id_table = keytouch_devices, .report_fixup = keytouch_report_fixup, }; module_hid_driver(keytouch_driver); MODULE_DESCRIPTION("HID driver for Keytouch devices not fully compliant with HID standard"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jiri Kosina"); |
2 2 2 2 1 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 | // SPDX-License-Identifier: GPL-2.0+ /* * copyright (C) 1999/2000 by Henning Zabel <henning@uni-paderborn.de> */ /* * USB-Kernel Driver for the Mustek MDC800 Digital Camera * (c) 1999/2000 Henning Zabel <henning@uni-paderborn.de> * * * The driver brings the USB functions of the MDC800 to Linux. * To use the Camera you must support the USB Protocol of the camera * to the Kernel Node. * The Driver uses a misc device Node. Create it with : * mknod /dev/mustek c 180 32 * * The driver supports only one camera. * * Fix: mdc800 used sleep_on and slept with io_lock held. * Converted sleep_on to waitqueues with schedule_timeout and made io_lock * a semaphore from a spinlock. * by Oliver Neukum <oliver@neukum.name> * (02/12/2001) * * Identify version on module load. * (08/04/2001) gb * * version 0.7.5 * Fixed potential SMP races with Spinlocks. * Thanks to Oliver Neukum <oliver@neukum.name> who * noticed the race conditions. * (30/10/2000) * * Fixed: Setting urb->dev before submitting urb. * by Greg KH <greg@kroah.com> * (13/10/2000) * * version 0.7.3 * bugfix : The mdc800->state field gets set to READY after the * disconnect function sets it to NOT_CONNECTED. This makes the * driver running like the camera is connected and causes some * hang ups. * * version 0.7.1 * MOD_INC and MOD_DEC are changed in usb_probe to prevent load/unload * problems when compiled as Module. * (04/04/2000) * * The mdc800 driver gets assigned the USB Minor 32-47. The Registration * was updated to use these values. * (26/03/2000) * * The Init und Exit Module Function are updated. * (01/03/2000) * * version 0.7.0 * Rewrite of the driver : The driver now uses URB's. The old stuff * has been removed. * * version 0.6.0 * Rewrite of this driver: The Emulation of the rs232 protocoll * has been removed from the driver. A special executeCommand function * for this driver is included to gphoto. * The driver supports two kind of communication to bulk endpoints. * Either with the dev->bus->ops->bulk... or with callback function. * (09/11/1999) * * version 0.5.0: * first Version that gets a version number. Most of the needed * functions work. * (20/10/1999) */ #include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/random.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/usb.h> #include <linux/fs.h> /* * Version Information */ #define DRIVER_VERSION "v0.7.5 (30/10/2000)" #define DRIVER_AUTHOR "Henning Zabel <henning@uni-paderborn.de>" #define DRIVER_DESC "USB Driver for Mustek MDC800 Digital Camera" /* Vendor and Product Information */ #define MDC800_VENDOR_ID 0x055f #define MDC800_PRODUCT_ID 0xa800 /* Timeouts (msec) */ #define TO_DOWNLOAD_GET_READY 1500 #define TO_DOWNLOAD_GET_BUSY 1500 #define TO_WRITE_GET_READY 1000 #define TO_DEFAULT_COMMAND 5000 #define TO_READ_FROM_IRQ TO_DEFAULT_COMMAND #define TO_GET_READY TO_DEFAULT_COMMAND /* Minor Number of the device (create with mknod /dev/mustek c 180 32) */ #define MDC800_DEVICE_MINOR_BASE 32 /************************************************************************** Data and structs ***************************************************************************/ typedef enum { NOT_CONNECTED, READY, WORKING, DOWNLOAD } mdc800_state; /* Data for the driver */ struct mdc800_data { struct usb_device * dev; // Device Data mdc800_state state; unsigned int endpoint [4]; struct urb * irq_urb; wait_queue_head_t irq_wait; int irq_woken; char* irq_urb_buffer; int camera_busy; // is camera busy ? int camera_request_ready; // Status to synchronize with irq char camera_response [8]; // last Bytes send after busy struct urb * write_urb; char* write_urb_buffer; wait_queue_head_t write_wait; int written; struct urb * download_urb; char* download_urb_buffer; wait_queue_head_t download_wait; int downloaded; int download_left; // Bytes left to download ? /* Device Data */ char out [64]; // Answer Buffer int out_ptr; // Index to the first not readen byte int out_count; // Bytes in the buffer int open; // Camera device open ? struct mutex io_lock; // IO -lock char in [8]; // Command Input Buffer int in_count; int pic_index; // Cache for the Imagesize (-1 for nothing cached ) int pic_len; int minor; }; /* Specification of the Endpoints */ static struct usb_endpoint_descriptor mdc800_ed [4] = { { .bLength = 0, .bDescriptorType = 0, .bEndpointAddress = 0x01, .bmAttributes = 0x02, .wMaxPacketSize = cpu_to_le16(8), .bInterval = 0, .bRefresh = 0, .bSynchAddress = 0, }, { .bLength = 0, .bDescriptorType = 0, .bEndpointAddress = 0x82, .bmAttributes = 0x03, .wMaxPacketSize = cpu_to_le16(8), .bInterval = 0, .bRefresh = 0, .bSynchAddress = 0, }, { .bLength = 0, .bDescriptorType = 0, .bEndpointAddress = 0x03, .bmAttributes = 0x02, .wMaxPacketSize = cpu_to_le16(64), .bInterval = 0, .bRefresh = 0, .bSynchAddress = 0, }, { .bLength = 0, .bDescriptorType = 0, .bEndpointAddress = 0x84, .bmAttributes = 0x02, .wMaxPacketSize = cpu_to_le16(64), .bInterval = 0, .bRefresh = 0, .bSynchAddress = 0, }, }; /* The Variable used by the driver */ static struct mdc800_data* mdc800; /*************************************************************************** The USB Part of the driver ****************************************************************************/ static int mdc800_endpoint_equals (struct usb_endpoint_descriptor *a,struct usb_endpoint_descriptor *b) { return ( ( a->bEndpointAddress == b->bEndpointAddress ) && ( a->bmAttributes == b->bmAttributes ) && ( a->wMaxPacketSize == b->wMaxPacketSize ) ); } /* * Checks whether the camera responds busy */ static int mdc800_isBusy (char* ch) { int i=0; while (i<8) { if (ch [i] != (char)0x99) return 0; i++; } return 1; } /* * Checks whether the Camera is ready */ static int mdc800_isReady (char *ch) { int i=0; while (i<8) { if (ch [i] != (char)0xbb) return 0; i++; } return 1; } /* * USB IRQ Handler for InputLine */ static void mdc800_usb_irq (struct urb *urb) { int data_received=0, wake_up; unsigned char* b=urb->transfer_buffer; struct mdc800_data* mdc800=urb->context; struct device *dev = &mdc800->dev->dev; int status = urb->status; if (status >= 0) { if (mdc800_isBusy (b)) { if (!mdc800->camera_busy) { mdc800->camera_busy=1; dev_dbg(dev, "gets busy\n"); } } else { if (mdc800->camera_busy && mdc800_isReady (b)) { mdc800->camera_busy=0; dev_dbg(dev, "gets ready\n"); } } if (!(mdc800_isBusy (b) || mdc800_isReady (b))) { /* Store Data in camera_answer field */ dev_dbg(dev, "%i %i %i %i %i %i %i %i \n",b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7]); memcpy (mdc800->camera_response,b,8); data_received=1; } } wake_up= ( mdc800->camera_request_ready > 0 ) && ( ((mdc800->camera_request_ready == 1) && (!mdc800->camera_busy)) || ((mdc800->camera_request_ready == 2) && data_received) || ((mdc800->camera_request_ready == 3) && (mdc800->camera_busy)) || (status < 0) ); if (wake_up) { mdc800->camera_request_ready=0; mdc800->irq_woken=1; wake_up (&mdc800->irq_wait); } } /* * Waits a while until the irq responds that camera is ready * * mode : 0: Wait for camera gets ready * 1: Wait for receiving data * 2: Wait for camera gets busy * * msec: Time to wait */ static int mdc800_usb_waitForIRQ (int mode, int msec) { mdc800->camera_request_ready=1+mode; wait_event_timeout(mdc800->irq_wait, mdc800->irq_woken, msecs_to_jiffies(msec)); mdc800->irq_woken = 0; if (mdc800->camera_request_ready>0) { mdc800->camera_request_ready=0; dev_err(&mdc800->dev->dev, "timeout waiting for camera.\n"); return -1; } if (mdc800->state == NOT_CONNECTED) { printk(KERN_WARNING "mdc800: Camera gets disconnected " "during waiting for irq.\n"); mdc800->camera_request_ready=0; return -2; } return 0; } /* * The write_urb callback function */ static void mdc800_usb_write_notify (struct urb *urb) { struct mdc800_data* mdc800=urb->context; int status = urb->status; if (status != 0) dev_err(&mdc800->dev->dev, "writing command fails (status=%i)\n", status); else mdc800->state=READY; mdc800->written = 1; wake_up (&mdc800->write_wait); } /* * The download_urb callback function */ static void mdc800_usb_download_notify (struct urb *urb) { struct mdc800_data* mdc800=urb->context; int status = urb->status; if (status == 0) { /* Fill output buffer with these data */ memcpy (mdc800->out, urb->transfer_buffer, 64); mdc800->out_count=64; mdc800->out_ptr=0; mdc800->download_left-=64; if (mdc800->download_left == 0) { mdc800->state=READY; } } else { dev_err(&mdc800->dev->dev, "request bytes fails (status:%i)\n", status); } mdc800->downloaded = 1; wake_up (&mdc800->download_wait); } /*************************************************************************** Probing for the Camera ***************************************************************************/ static struct usb_driver mdc800_usb_driver; static const struct file_operations mdc800_device_ops; static struct usb_class_driver mdc800_class = { .name = "mdc800%d", .fops = &mdc800_device_ops, .minor_base = MDC800_DEVICE_MINOR_BASE, }; /* * Callback to search the Mustek MDC800 on the USB Bus */ static int mdc800_usb_probe (struct usb_interface *intf, const struct usb_device_id *id) { int i,j; struct usb_host_interface *intf_desc; struct usb_device *dev = interface_to_usbdev (intf); int irq_interval=0; int retval; dev_dbg(&intf->dev, "(%s) called.\n", __func__); if (mdc800->dev != NULL) { dev_warn(&intf->dev, "only one Mustek MDC800 is supported.\n"); return -ENODEV; } if (dev->descriptor.bNumConfigurations != 1) { dev_err(&intf->dev, "probe fails -> wrong Number of Configuration\n"); return -ENODEV; } intf_desc = intf->cur_altsetting; if ( ( intf_desc->desc.bInterfaceClass != 0xff ) || ( intf_desc->desc.bInterfaceSubClass != 0 ) || ( intf_desc->desc.bInterfaceProtocol != 0 ) || ( intf_desc->desc.bNumEndpoints != 4) ) { dev_err(&intf->dev, "probe fails -> wrong Interface\n"); return -ENODEV; } /* Check the Endpoints */ for (i=0; i<4; i++) { mdc800->endpoint[i]=-1; for (j=0; j<4; j++) { if (mdc800_endpoint_equals (&intf_desc->endpoint [j].desc,&mdc800_ed [i])) { mdc800->endpoint[i]=intf_desc->endpoint [j].desc.bEndpointAddress ; if (i==1) { irq_interval=intf_desc->endpoint [j].desc.bInterval; } } } if (mdc800->endpoint[i] == -1) { dev_err(&intf->dev, "probe fails -> Wrong Endpoints.\n"); return -ENODEV; } } dev_info(&intf->dev, "Found Mustek MDC800 on USB.\n"); mutex_lock(&mdc800->io_lock); retval = usb_register_dev(intf, &mdc800_class); if (retval) { dev_err(&intf->dev, "Not able to get a minor for this device.\n"); mutex_unlock(&mdc800->io_lock); return -ENODEV; } mdc800->dev=dev; mdc800->open=0; /* Setup URB Structs */ usb_fill_int_urb ( mdc800->irq_urb, mdc800->dev, usb_rcvintpipe (mdc800->dev,mdc800->endpoint [1]), mdc800->irq_urb_buffer, 8, mdc800_usb_irq, mdc800, irq_interval ); usb_fill_bulk_urb ( mdc800->write_urb, mdc800->dev, usb_sndbulkpipe (mdc800->dev, mdc800->endpoint[0]), mdc800->write_urb_buffer, 8, mdc800_usb_write_notify, mdc800 ); usb_fill_bulk_urb ( mdc800->download_urb, mdc800->dev, usb_rcvbulkpipe (mdc800->dev, mdc800->endpoint [3]), mdc800->download_urb_buffer, 64, mdc800_usb_download_notify, mdc800 ); mdc800->state=READY; mutex_unlock(&mdc800->io_lock); usb_set_intfdata(intf, mdc800); return 0; } /* * Disconnect USB device (maybe the MDC800) */ static void mdc800_usb_disconnect (struct usb_interface *intf) { struct mdc800_data* mdc800 = usb_get_intfdata(intf); dev_dbg(&intf->dev, "(%s) called\n", __func__); if (mdc800) { if (mdc800->state == NOT_CONNECTED) return; usb_deregister_dev(intf, &mdc800_class); /* must be under lock to make sure no URB is submitted after usb_kill_urb() */ mutex_lock(&mdc800->io_lock); mdc800->state=NOT_CONNECTED; usb_kill_urb(mdc800->irq_urb); usb_kill_urb(mdc800->write_urb); usb_kill_urb(mdc800->download_urb); mutex_unlock(&mdc800->io_lock); mdc800->dev = NULL; usb_set_intfdata(intf, NULL); } dev_info(&intf->dev, "Mustek MDC800 disconnected from USB.\n"); } /*************************************************************************** The Misc device Part (file_operations) ****************************************************************************/ /* * This Function calc the Answersize for a command. */ static int mdc800_getAnswerSize (char command) { switch ((unsigned char) command) { case 0x2a: case 0x49: case 0x51: case 0x0d: case 0x20: case 0x07: case 0x01: case 0x25: case 0x00: return 8; case 0x05: case 0x3e: return mdc800->pic_len; case 0x09: return 4096; default: return 0; } } /* * Init the device: (1) alloc mem (2) Increase MOD Count .. */ static int mdc800_device_open (struct inode* inode, struct file *file) { int retval=0; int errn=0; mutex_lock(&mdc800->io_lock); if (mdc800->state == NOT_CONNECTED) { errn=-EBUSY; goto error_out; } if (mdc800->open) { errn=-EBUSY; goto error_out; } mdc800->in_count=0; mdc800->out_count=0; mdc800->out_ptr=0; mdc800->pic_index=0; mdc800->pic_len=-1; mdc800->download_left=0; mdc800->camera_busy=0; mdc800->camera_request_ready=0; mdc800->irq_urb->dev = mdc800->dev; retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL); if (retval) { dev_err(&mdc800->dev->dev, "request USB irq fails (submit_retval=%i).\n", retval); errn = -EIO; goto error_out; } mdc800->open=1; dev_dbg(&mdc800->dev->dev, "Mustek MDC800 device opened.\n"); error_out: mutex_unlock(&mdc800->io_lock); return errn; } /* * Close the Camera and release Memory */ static int mdc800_device_release (struct inode* inode, struct file *file) { int retval=0; mutex_lock(&mdc800->io_lock); if (mdc800->open && (mdc800->state != NOT_CONNECTED)) { usb_kill_urb(mdc800->irq_urb); usb_kill_urb(mdc800->write_urb); usb_kill_urb(mdc800->download_urb); mdc800->open=0; } else { retval=-EIO; } mutex_unlock(&mdc800->io_lock); return retval; } /* * The Device read callback Function */ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t len, loff_t *pos) { size_t left=len, sts=len; /* single transfer size */ char __user *ptr = buf; int retval; mutex_lock(&mdc800->io_lock); if (mdc800->state == NOT_CONNECTED) { mutex_unlock(&mdc800->io_lock); return -EBUSY; } if (mdc800->state == WORKING) { printk(KERN_WARNING "mdc800: Illegal State \"working\"" "reached during read ?!\n"); mutex_unlock(&mdc800->io_lock); return -EBUSY; } if (!mdc800->open) { mutex_unlock(&mdc800->io_lock); return -EBUSY; } while (left) { if (signal_pending (current)) { mutex_unlock(&mdc800->io_lock); return -EINTR; } sts=left > (mdc800->out_count-mdc800->out_ptr)?mdc800->out_count-mdc800->out_ptr:left; if (sts <= 0) { /* Too less Data in buffer */ if (mdc800->state == DOWNLOAD) { mdc800->out_count=0; mdc800->out_ptr=0; /* Download -> Request new bytes */ mdc800->download_urb->dev = mdc800->dev; retval = usb_submit_urb (mdc800->download_urb, GFP_KERNEL); if (retval) { dev_err(&mdc800->dev->dev, "Can't submit download urb " "(retval=%i)\n", retval); mutex_unlock(&mdc800->io_lock); return len-left; } wait_event_timeout(mdc800->download_wait, mdc800->downloaded, msecs_to_jiffies(TO_DOWNLOAD_GET_READY)); mdc800->downloaded = 0; if (mdc800->download_urb->status != 0) { dev_err(&mdc800->dev->dev, "request download-bytes fails " "(status=%i)\n", mdc800->download_urb->status); mutex_unlock(&mdc800->io_lock); return len-left; } } else { /* No more bytes -> that's an error*/ mutex_unlock(&mdc800->io_lock); return -EIO; } } else { /* Copy Bytes */ if (copy_to_user(ptr, &mdc800->out [mdc800->out_ptr], sts)) { mutex_unlock(&mdc800->io_lock); return -EFAULT; } ptr+=sts; left-=sts; mdc800->out_ptr+=sts; } } mutex_unlock(&mdc800->io_lock); return len-left; } /* * The Device write callback Function * If a 8Byte Command is received, it will be send to the camera. * After this the driver initiates the request for the answer or * just waits until the camera becomes ready. */ static ssize_t mdc800_device_write (struct file *file, const char __user *buf, size_t len, loff_t *pos) { size_t i=0; int retval; mutex_lock(&mdc800->io_lock); if (mdc800->state != READY) { mutex_unlock(&mdc800->io_lock); return -EBUSY; } if (!mdc800->open ) { mutex_unlock(&mdc800->io_lock); return -EBUSY; } while (i<len) { unsigned char c; if (signal_pending (current)) { mutex_unlock(&mdc800->io_lock); return -EINTR; } if(get_user(c, buf+i)) { mutex_unlock(&mdc800->io_lock); return -EFAULT; } /* check for command start */ if (c == 0x55) { mdc800->in_count=0; mdc800->out_count=0; mdc800->out_ptr=0; mdc800->download_left=0; } /* save command byte */ if (mdc800->in_count < 8) { mdc800->in[mdc800->in_count] = c; mdc800->in_count++; } else { mutex_unlock(&mdc800->io_lock); return -EIO; } /* Command Buffer full ? -> send it to camera */ if (mdc800->in_count == 8) { int answersize; if (mdc800_usb_waitForIRQ (0,TO_GET_READY)) { dev_err(&mdc800->dev->dev, "Camera didn't get ready.\n"); mutex_unlock(&mdc800->io_lock); return -EIO; } answersize=mdc800_getAnswerSize (mdc800->in[1]); mdc800->state=WORKING; memcpy (mdc800->write_urb->transfer_buffer, mdc800->in,8); mdc800->write_urb->dev = mdc800->dev; retval = usb_submit_urb (mdc800->write_urb, GFP_KERNEL); if (retval) { dev_err(&mdc800->dev->dev, "submitting write urb fails " "(retval=%i)\n", retval); mutex_unlock(&mdc800->io_lock); return -EIO; } wait_event_timeout(mdc800->write_wait, mdc800->written, msecs_to_jiffies(TO_WRITE_GET_READY)); mdc800->written = 0; if (mdc800->state == WORKING) { usb_kill_urb(mdc800->write_urb); mutex_unlock(&mdc800->io_lock); return -EIO; } switch ((unsigned char) mdc800->in[1]) { case 0x05: /* Download Image */ case 0x3e: /* Take shot in Fine Mode (WCam Mode) */ if (mdc800->pic_len < 0) { dev_err(&mdc800->dev->dev, "call 0x07 before " "0x05,0x3e\n"); mdc800->state=READY; mutex_unlock(&mdc800->io_lock); return -EIO; } mdc800->pic_len=-1; fallthrough; case 0x09: /* Download Thumbnail */ mdc800->download_left=answersize+64; mdc800->state=DOWNLOAD; mdc800_usb_waitForIRQ (0,TO_DOWNLOAD_GET_BUSY); break; default: if (answersize) { if (mdc800_usb_waitForIRQ (1,TO_READ_FROM_IRQ)) { dev_err(&mdc800->dev->dev, "requesting answer from irq fails\n"); mutex_unlock(&mdc800->io_lock); return -EIO; } /* Write dummy data, (this is ugly but part of the USB Protocol */ /* if you use endpoint 1 as bulk and not as irq) */ memcpy (mdc800->out, mdc800->camera_response,8); /* This is the interpreted answer */ memcpy (&mdc800->out[8], mdc800->camera_response,8); mdc800->out_ptr=0; mdc800->out_count=16; /* Cache the Imagesize, if command was getImageSize */ if (mdc800->in [1] == (char) 0x07) { mdc800->pic_len=(int) 65536*(unsigned char) mdc800->camera_response[0]+256*(unsigned char) mdc800->camera_response[1]+(unsigned char) mdc800->camera_response[2]; dev_dbg(&mdc800->dev->dev, "cached imagesize = %i\n", mdc800->pic_len); } } else { if (mdc800_usb_waitForIRQ (0,TO_DEFAULT_COMMAND)) { dev_err(&mdc800->dev->dev, "Command Timeout.\n"); mutex_unlock(&mdc800->io_lock); return -EIO; } } mdc800->state=READY; break; } } i++; } mutex_unlock(&mdc800->io_lock); return i; } /*************************************************************************** Init and Cleanup this driver (Structs and types) ****************************************************************************/ /* File Operations of this drivers */ static const struct file_operations mdc800_device_ops = { .owner = THIS_MODULE, .read = mdc800_device_read, .write = mdc800_device_write, .open = mdc800_device_open, .release = mdc800_device_release, .llseek = noop_llseek, }; static const struct usb_device_id mdc800_table[] = { { USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, mdc800_table); /* * USB Driver Struct for this device */ static struct usb_driver mdc800_usb_driver = { .name = "mdc800", .probe = mdc800_usb_probe, .disconnect = mdc800_usb_disconnect, .id_table = mdc800_table }; /************************************************************************ Init and Cleanup this driver (Main Functions) *************************************************************************/ static int __init usb_mdc800_init (void) { int retval = -ENODEV; /* Allocate Memory */ mdc800=kzalloc (sizeof (struct mdc800_data), GFP_KERNEL); if (!mdc800) goto cleanup_on_fail; mdc800->dev = NULL; mdc800->state=NOT_CONNECTED; mutex_init (&mdc800->io_lock); init_waitqueue_head (&mdc800->irq_wait); init_waitqueue_head (&mdc800->write_wait); init_waitqueue_head (&mdc800->download_wait); mdc800->irq_woken = 0; mdc800->downloaded = 0; mdc800->written = 0; mdc800->irq_urb_buffer=kmalloc (8, GFP_KERNEL); if (!mdc800->irq_urb_buffer) goto cleanup_on_fail; mdc800->write_urb_buffer=kmalloc (8, GFP_KERNEL); if (!mdc800->write_urb_buffer) goto cleanup_on_fail; mdc800->download_urb_buffer=kmalloc (64, GFP_KERNEL); if (!mdc800->download_urb_buffer) goto cleanup_on_fail; mdc800->irq_urb=usb_alloc_urb (0, GFP_KERNEL); if (!mdc800->irq_urb) goto cleanup_on_fail; mdc800->download_urb=usb_alloc_urb (0, GFP_KERNEL); if (!mdc800->download_urb) goto cleanup_on_fail; mdc800->write_urb=usb_alloc_urb (0, GFP_KERNEL); if (!mdc800->write_urb) goto cleanup_on_fail; /* Register the driver */ retval = usb_register(&mdc800_usb_driver); if (retval) goto cleanup_on_fail; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; /* Clean driver up, when something fails */ cleanup_on_fail: if (mdc800 != NULL) { printk(KERN_ERR "mdc800: can't alloc memory!\n"); kfree(mdc800->download_urb_buffer); kfree(mdc800->write_urb_buffer); kfree(mdc800->irq_urb_buffer); usb_free_urb(mdc800->write_urb); usb_free_urb(mdc800->download_urb); usb_free_urb(mdc800->irq_urb); kfree (mdc800); } mdc800 = NULL; return retval; } static void __exit usb_mdc800_cleanup (void) { usb_deregister (&mdc800_usb_driver); usb_free_urb (mdc800->irq_urb); usb_free_urb (mdc800->download_urb); usb_free_urb (mdc800->write_urb); kfree (mdc800->irq_urb_buffer); kfree (mdc800->write_urb_buffer); kfree (mdc800->download_urb_buffer); kfree (mdc800); mdc800 = NULL; } module_init (usb_mdc800_init); module_exit (usb_mdc800_cleanup); MODULE_AUTHOR( DRIVER_AUTHOR ); MODULE_DESCRIPTION( DRIVER_DESC ); MODULE_LICENSE("GPL"); |
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 2 1 1 2 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 | // SPDX-License-Identifier: GPL-2.0-only /* * Generic show_mem() implementation * * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> */ #include <linux/blkdev.h> #include <linux/cma.h> #include <linux/cpuset.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/swap.h> #include <linux/vmstat.h> #include "internal.h" #include "swap.h" atomic_long_t _totalram_pages __read_mostly; EXPORT_SYMBOL(_totalram_pages); unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; static inline void show_node(struct zone *zone) { if (IS_ENABLED(CONFIG_NUMA)) printk("Node %d ", zone_to_nid(zone)); } long si_mem_available(void) { long available; unsigned long pagecache; unsigned long wmark_low = 0; unsigned long reclaimable; struct zone *zone; for_each_zone(zone) wmark_low += low_wmark_pages(zone); /* * Estimate the amount of memory available for userspace allocations, * without causing swapping or OOM. */ available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; /* * Not all the page cache can be freed, otherwise the system will * start swapping or thrashing. Assume at least half of the page * cache, or the low watermark worth of cache, needs to stay. */ pagecache = global_node_page_state(NR_ACTIVE_FILE) + global_node_page_state(NR_INACTIVE_FILE); pagecache -= min(pagecache / 2, wmark_low); available += pagecache; /* * Part of the reclaimable slab and other kernel memory consists of * items that are in use, and cannot be freed. Cap this estimate at the * low watermark. */ reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); reclaimable -= min(reclaimable / 2, wmark_low); available += reclaimable; if (available < 0) available = 0; return available; } EXPORT_SYMBOL_GPL(si_mem_available); void si_meminfo(struct sysinfo *val) { val->totalram = totalram_pages(); val->sharedram = global_node_page_state(NR_SHMEM); val->freeram = global_zone_page_state(NR_FREE_PAGES); val->bufferram = nr_blockdev_pages(); val->totalhigh = totalhigh_pages(); val->freehigh = nr_free_highpages(); val->mem_unit = PAGE_SIZE; } EXPORT_SYMBOL(si_meminfo); #ifdef CONFIG_NUMA void si_meminfo_node(struct sysinfo *val, int nid) { int zone_type; /* needs to be signed */ unsigned long managed_pages = 0; unsigned long managed_highpages = 0; unsigned long free_highpages = 0; pg_data_t *pgdat = NODE_DATA(nid); for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { struct zone *zone = &pgdat->node_zones[zone_type]; managed_pages += zone_managed_pages(zone); if (is_highmem(zone)) { managed_highpages += zone_managed_pages(zone); free_highpages += zone_page_state(zone, NR_FREE_PAGES); } } val->totalram = managed_pages; val->sharedram = node_page_state(pgdat, NR_SHMEM); val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); val->totalhigh = managed_highpages; val->freehigh = free_highpages; val->mem_unit = PAGE_SIZE; } #endif /* * Determine whether the node should be displayed or not, depending on whether * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). */ static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) { if (!(flags & SHOW_MEM_FILTER_NODES)) return false; /* * no node mask - aka implicit memory numa policy. Do not bother with * the synchronization - read_mems_allowed_begin - because we do not * have to be precise here. */ if (!nodemask) nodemask = &cpuset_current_mems_allowed; return !node_isset(nid, *nodemask); } static void show_migration_types(unsigned char type) { static const char types[MIGRATE_TYPES] = { [MIGRATE_UNMOVABLE] = 'U', [MIGRATE_MOVABLE] = 'M', [MIGRATE_RECLAIMABLE] = 'E', [MIGRATE_HIGHATOMIC] = 'H', #ifdef CONFIG_CMA [MIGRATE_CMA] = 'C', #endif #ifdef CONFIG_MEMORY_ISOLATION [MIGRATE_ISOLATE] = 'I', #endif }; char tmp[MIGRATE_TYPES + 1]; char *p = tmp; int i; for (i = 0; i < MIGRATE_TYPES; i++) { if (type & (1 << i)) *p++ = types[i]; } *p = '\0'; printk(KERN_CONT "(%s) ", tmp); } static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) { int zone_idx; for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) if (zone_managed_pages(pgdat->node_zones + zone_idx)) return true; return false; } /* * Show free area list (used inside shift_scroll-lock stuff) * We also calculate the percentage fragmentation. We do this by counting the * memory on each free list with the exception of the first item on the list. * * Bits in @filter: * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's * cpuset. */ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) { unsigned long free_pcp = 0; int cpu, nid; struct zone *zone; pg_data_t *pgdat; for_each_populated_zone(zone) { if (zone_idx(zone) > max_zone_idx) continue; if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) continue; for_each_online_cpu(cpu) free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; } printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" " active_file:%lu inactive_file:%lu isolated_file:%lu\n" " unevictable:%lu dirty:%lu writeback:%lu\n" " slab_reclaimable:%lu slab_unreclaimable:%lu\n" " mapped:%lu shmem:%lu pagetables:%lu\n" " sec_pagetables:%lu bounce:%lu\n" " kernel_misc_reclaimable:%lu\n" " free:%lu free_pcp:%lu free_cma:%lu\n", global_node_page_state(NR_ACTIVE_ANON), global_node_page_state(NR_INACTIVE_ANON), global_node_page_state(NR_ISOLATED_ANON), global_node_page_state(NR_ACTIVE_FILE), global_node_page_state(NR_INACTIVE_FILE), global_node_page_state(NR_ISOLATED_FILE), global_node_page_state(NR_UNEVICTABLE), global_node_page_state(NR_FILE_DIRTY), global_node_page_state(NR_WRITEBACK), global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), global_node_page_state(NR_FILE_MAPPED), global_node_page_state(NR_SHMEM), global_node_page_state(NR_PAGETABLE), global_node_page_state(NR_SECONDARY_PAGETABLE), 0UL, global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), global_zone_page_state(NR_FREE_PAGES), free_pcp, global_zone_page_state(NR_FREE_CMA_PAGES)); for_each_online_pgdat(pgdat) { if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) continue; if (!node_has_managed_zones(pgdat, max_zone_idx)) continue; printk("Node %d" " active_anon:%lukB" " inactive_anon:%lukB" " active_file:%lukB" " inactive_file:%lukB" " unevictable:%lukB" " isolated(anon):%lukB" " isolated(file):%lukB" " mapped:%lukB" " dirty:%lukB" " writeback:%lukB" " shmem:%lukB" #ifdef CONFIG_TRANSPARENT_HUGEPAGE " shmem_thp:%lukB" " shmem_pmdmapped:%lukB" " anon_thp:%lukB" #endif " writeback_tmp:%lukB" " kernel_stack:%lukB" #ifdef CONFIG_SHADOW_CALL_STACK " shadow_call_stack:%lukB" #endif " pagetables:%lukB" " sec_pagetables:%lukB" " all_unreclaimable? %s" " Balloon:%lukB" "\n", pgdat->node_id, K(node_page_state(pgdat, NR_ACTIVE_ANON)), K(node_page_state(pgdat, NR_INACTIVE_ANON)), K(node_page_state(pgdat, NR_ACTIVE_FILE)), K(node_page_state(pgdat, NR_INACTIVE_FILE)), K(node_page_state(pgdat, NR_UNEVICTABLE)), K(node_page_state(pgdat, NR_ISOLATED_ANON)), K(node_page_state(pgdat, NR_ISOLATED_FILE)), K(node_page_state(pgdat, NR_FILE_MAPPED)), K(node_page_state(pgdat, NR_FILE_DIRTY)), K(node_page_state(pgdat, NR_WRITEBACK)), K(node_page_state(pgdat, NR_SHMEM)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE K(node_page_state(pgdat, NR_SHMEM_THPS)), K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), K(node_page_state(pgdat, NR_ANON_THPS)), #endif K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), node_page_state(pgdat, NR_KERNEL_STACK_KB), #ifdef CONFIG_SHADOW_CALL_STACK node_page_state(pgdat, NR_KERNEL_SCS_KB), #endif K(node_page_state(pgdat, NR_PAGETABLE)), K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), str_yes_no(pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES), K(node_page_state(pgdat, NR_BALLOON_PAGES))); } for_each_populated_zone(zone) { int i; if (zone_idx(zone) > max_zone_idx) continue; if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) continue; free_pcp = 0; for_each_online_cpu(cpu) free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; show_node(zone); printk(KERN_CONT "%s" " free:%lukB" " boost:%lukB" " min:%lukB" " low:%lukB" " high:%lukB" " reserved_highatomic:%luKB" " free_highatomic:%luKB" " active_anon:%lukB" " inactive_anon:%lukB" " active_file:%lukB" " inactive_file:%lukB" " unevictable:%lukB" " writepending:%lukB" " present:%lukB" " managed:%lukB" " mlocked:%lukB" " bounce:%lukB" " free_pcp:%lukB" " local_pcp:%ukB" " free_cma:%lukB" "\n", zone->name, K(zone_page_state(zone, NR_FREE_PAGES)), K(zone->watermark_boost), K(min_wmark_pages(zone)), K(low_wmark_pages(zone)), K(high_wmark_pages(zone)), K(zone->nr_reserved_highatomic), K(zone->nr_free_highatomic), K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), K(zone->present_pages), K(zone_managed_pages(zone)), K(zone_page_state(zone, NR_MLOCK)), 0UL, K(free_pcp), K(this_cpu_read(zone->per_cpu_pageset->count)), K(zone_page_state(zone, NR_FREE_CMA_PAGES))); printk("lowmem_reserve[]:"); for (i = 0; i < MAX_NR_ZONES; i++) printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); printk(KERN_CONT "\n"); } for_each_populated_zone(zone) { unsigned int order; unsigned long nr[NR_PAGE_ORDERS], flags, total = 0; unsigned char types[NR_PAGE_ORDERS]; if (zone_idx(zone) > max_zone_idx) continue; if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) continue; show_node(zone); printk(KERN_CONT "%s: ", zone->name); spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < NR_PAGE_ORDERS; order++) { struct free_area *area = &zone->free_area[order]; int type; nr[order] = area->nr_free; total += nr[order] << order; types[order] = 0; for (type = 0; type < MIGRATE_TYPES; type++) { if (!free_area_empty(area, type)) types[order] |= 1 << type; } } spin_unlock_irqrestore(&zone->lock, flags); for (order = 0; order < NR_PAGE_ORDERS; order++) { printk(KERN_CONT "%lu*%lukB ", nr[order], K(1UL) << order); if (nr[order]) show_migration_types(types[order]); } printk(KERN_CONT "= %lukB\n", K(total)); } for_each_online_node(nid) { if (show_mem_node_skip(filter, nid, nodemask)) continue; hugetlb_show_meminfo_node(nid); } printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); show_swap_cache_info(); } void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) { unsigned long total = 0, reserved = 0, highmem = 0; struct zone *zone; printk("Mem-Info:\n"); show_free_areas(filter, nodemask, max_zone_idx); for_each_populated_zone(zone) { total += zone->present_pages; reserved += zone->present_pages - zone_managed_pages(zone); if (is_highmem(zone)) highmem += zone->present_pages; } printk("%lu pages RAM\n", total); printk("%lu pages HighMem/MovableOnly\n", highmem); printk("%lu pages reserved\n", reserved); #ifdef CONFIG_CMA printk("%lu pages cma reserved\n", totalcma_pages); #endif #ifdef CONFIG_MEMORY_FAILURE printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); #endif #ifdef CONFIG_MEM_ALLOC_PROFILING { struct codetag_bytes tags[10]; size_t i, nr; nr = alloc_tag_top_users(tags, ARRAY_SIZE(tags), false); if (nr) { pr_notice("Memory allocations:\n"); for (i = 0; i < nr; i++) { struct codetag *ct = tags[i].ct; struct alloc_tag *tag = ct_to_alloc_tag(ct); struct alloc_tag_counters counter = alloc_tag_read(tag); char bytes[10]; string_get_size(counter.bytes, 1, STRING_UNITS_2, bytes, sizeof(bytes)); /* Same as alloc_tag_to_text() but w/o intermediate buffer */ if (ct->modname) pr_notice("%12s %8llu %s:%u [%s] func:%s\n", bytes, counter.calls, ct->filename, ct->lineno, ct->modname, ct->function); else pr_notice("%12s %8llu %s:%u func:%s\n", bytes, counter.calls, ct->filename, ct->lineno, ct->function); } } } #endif } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/signalfd.h * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * */ #ifndef _LINUX_SIGNALFD_H #define _LINUX_SIGNALFD_H #include <uapi/linux/signalfd.h> #include <linux/sched/signal.h> #ifdef CONFIG_SIGNALFD /* * Deliver the signal to listening signalfd. */ static inline void signalfd_notify(struct task_struct *tsk, int sig) { if (unlikely(waitqueue_active(&tsk->sighand->signalfd_wqh))) wake_up(&tsk->sighand->signalfd_wqh); } extern void signalfd_cleanup(struct sighand_struct *sighand); #else /* CONFIG_SIGNALFD */ static inline void signalfd_notify(struct task_struct *tsk, int sig) { } static inline void signalfd_cleanup(struct sighand_struct *sighand) { } #endif /* CONFIG_SIGNALFD */ #endif /* _LINUX_SIGNALFD_H */ |
17 17 13 17 17 17 17 17 17 17 17 165 165 165 165 165 165 164 165 165 165 165 165 165 165 165 165 165 165 164 165 165 164 165 165 165 165 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 5 13 13 13 17 17 17 17 17 17 17 17 17 17 17 169 169 169 168 169 17 17 17 17 17 17 17 17 17 17 17 17 13 13 13 12 13 12 12 12 12 12 12 11 12 13 13 13 13 13 12 117 17 17 13 12 17 117 16 17 17 16 17 17 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 | // SPDX-License-Identifier: GPL-2.0-only /* * This is the linux wireless configuration interface. * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2025 Intel Corporation */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/if.h> #include <linux/module.h> #include <linux/err.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/nl80211.h> #include <linux/debugfs.h> #include <linux/notifier.h> #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/sched.h> #include <net/genetlink.h> #include <net/cfg80211.h> #include "nl80211.h" #include "core.h" #include "sysfs.h" #include "debugfs.h" #include "wext-compat.h" #include "rdev-ops.h" /* name for sysfs, %d is appended */ #define PHY_NAME "phy" MODULE_AUTHOR("Johannes Berg"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("wireless configuration support"); MODULE_ALIAS_GENL_FAMILY(NL80211_GENL_NAME); /* RCU-protected (and RTNL for writers) */ LIST_HEAD(cfg80211_rdev_list); int cfg80211_rdev_list_generation; /* for debugfs */ static struct dentry *ieee80211_debugfs_dir; /* for the cleanup, scan and event works */ struct workqueue_struct *cfg80211_wq; static bool cfg80211_disable_40mhz_24ghz; module_param(cfg80211_disable_40mhz_24ghz, bool, 0644); MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz, "Disable 40MHz support in the 2.4GHz band"); struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) { struct cfg80211_registered_device *result = NULL, *rdev; ASSERT_RTNL(); for_each_rdev(rdev) { if (rdev->wiphy_idx == wiphy_idx) { result = rdev; break; } } return result; } int get_wiphy_idx(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); return rdev->wiphy_idx; } struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx); if (!rdev) return NULL; return &rdev->wiphy; } static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev, const char *newname) { struct cfg80211_registered_device *rdev2; int wiphy_idx, taken = -1, digits; ASSERT_RTNL(); if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN) return -EINVAL; /* prohibit calling the thing phy%d when %d is not its number */ sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) { /* count number of places needed to print wiphy_idx */ digits = 1; while (wiphy_idx /= 10) digits++; /* * deny the name if it is phy<idx> where <idx> is printed * without leading zeroes. taken == strlen(newname) here */ if (taken == strlen(PHY_NAME) + digits) return -EINVAL; } /* Ensure another device does not already have this name. */ for_each_rdev(rdev2) if (strcmp(newname, wiphy_name(&rdev2->wiphy)) == 0) return -EINVAL; return 0; } int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, char *newname) { int result; ASSERT_RTNL(); lockdep_assert_wiphy(&rdev->wiphy); /* Ignore nop renames */ if (strcmp(newname, wiphy_name(&rdev->wiphy)) == 0) return 0; result = cfg80211_dev_check_name(rdev, newname); if (result < 0) return result; result = device_rename(&rdev->wiphy.dev, newname); if (result) return result; debugfs_change_name(rdev->wiphy.debugfsdir, "%s", newname); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); return 0; } int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, struct net *net) { struct wireless_dev *wdev; int err = 0; if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK)) return -EOPNOTSUPP; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; wdev->netdev->netns_immutable = false; err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); if (err) break; wdev->netdev->netns_immutable = true; } if (err) { /* failed -- clean up to old netns */ net = wiphy_net(&rdev->wiphy); list_for_each_entry_continue_reverse(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; wdev->netdev->netns_immutable = false; err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); WARN_ON(err); wdev->netdev->netns_immutable = true; } return err; } guard(wiphy)(&rdev->wiphy); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); } nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY); wiphy_net_set(&rdev->wiphy, net); err = device_rename(&rdev->wiphy.dev, dev_name(&rdev->wiphy.dev)); WARN_ON(err); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); } return 0; } static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) { struct cfg80211_registered_device *rdev = data; guard(wiphy)(&rdev->wiphy); rdev_rfkill_poll(rdev); } void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { lockdep_assert_held(&rdev->wiphy.mtx); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) return; if (!wdev_running(wdev)) return; rdev_stop_p2p_device(rdev, wdev); wdev->is_running = false; rdev->opencount--; if (rdev->scan_req && rdev->scan_req->wdev == wdev) { if (WARN_ON(!rdev->scan_req->notified && (!rdev->int_scan_req || !rdev->int_scan_req->notified))) rdev->scan_req->info.aborted = true; ___cfg80211_scan_done(rdev, false); } } void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { lockdep_assert_held(&rdev->wiphy.mtx); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_NAN)) return; if (!wdev_running(wdev)) return; rdev_stop_nan(rdev, wdev); wdev->is_running = false; rdev->opencount--; } void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct wireless_dev *wdev; ASSERT_RTNL(); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (wdev->netdev) { dev_close(wdev->netdev); continue; } /* otherwise, check iftype */ guard(wiphy)(wiphy); switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: cfg80211_stop_p2p_device(rdev, wdev); break; case NL80211_IFTYPE_NAN: cfg80211_stop_nan(rdev, wdev); break; default: break; } } } EXPORT_SYMBOL_GPL(cfg80211_shutdown_all_interfaces); static int cfg80211_rfkill_set_block(void *data, bool blocked) { struct cfg80211_registered_device *rdev = data; if (!blocked) return 0; rtnl_lock(); cfg80211_shutdown_all_interfaces(&rdev->wiphy); rtnl_unlock(); return 0; } static void cfg80211_rfkill_block_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, rfkill_block); cfg80211_rfkill_set_block(rdev, true); } static void cfg80211_event_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, event_work); guard(wiphy)(&rdev->wiphy); cfg80211_process_rdev_events(rdev); } void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev, *tmp; ASSERT_RTNL(); list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) { if (wdev->nl_owner_dead) { if (wdev->netdev) dev_close(wdev->netdev); guard(wiphy)(&rdev->wiphy); cfg80211_leave(rdev, wdev); cfg80211_remove_virtual_intf(rdev, wdev); } } } static void cfg80211_destroy_iface_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, destroy_work); rtnl_lock(); cfg80211_destroy_ifaces(rdev); rtnl_unlock(); } static void cfg80211_sched_scan_stop_wk(struct wiphy *wiphy, struct wiphy_work *work) { struct cfg80211_registered_device *rdev; struct cfg80211_sched_scan_request *req, *tmp; rdev = container_of(work, struct cfg80211_registered_device, sched_scan_stop_wk); list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { if (req->nl_owner_dead) cfg80211_stop_sched_scan_req(rdev, req, false); } } static void cfg80211_propagate_radar_detect_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, propagate_radar_detect_wk); rtnl_lock(); regulatory_propagate_dfs_state(&rdev->wiphy, &rdev->radar_chandef, NL80211_DFS_UNAVAILABLE, NL80211_RADAR_DETECTED); rtnl_unlock(); } static void cfg80211_propagate_cac_done_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, propagate_cac_done_wk); rtnl_lock(); regulatory_propagate_dfs_state(&rdev->wiphy, &rdev->cac_done_chandef, NL80211_DFS_AVAILABLE, NL80211_RADAR_CAC_FINISHED); rtnl_unlock(); } static void cfg80211_wiphy_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; struct wiphy_work *wk; rdev = container_of(work, struct cfg80211_registered_device, wiphy_work); trace_wiphy_work_worker_start(&rdev->wiphy); guard(wiphy)(&rdev->wiphy); if (rdev->suspended) return; spin_lock_irq(&rdev->wiphy_work_lock); wk = list_first_entry_or_null(&rdev->wiphy_work_list, struct wiphy_work, entry); if (wk) { list_del_init(&wk->entry); if (!list_empty(&rdev->wiphy_work_list)) queue_work(system_unbound_wq, work); spin_unlock_irq(&rdev->wiphy_work_lock); trace_wiphy_work_run(&rdev->wiphy, wk); wk->func(&rdev->wiphy, wk); } else { spin_unlock_irq(&rdev->wiphy_work_lock); } } /* exported functions */ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, const char *requested_name) { static atomic_t wiphy_counter = ATOMIC_INIT(0); struct cfg80211_registered_device *rdev; int alloc_size; WARN_ON(ops->add_key && (!ops->del_key || !ops->set_default_key)); WARN_ON(ops->auth && (!ops->assoc || !ops->deauth || !ops->disassoc)); WARN_ON(ops->connect && !ops->disconnect); WARN_ON(ops->join_ibss && !ops->leave_ibss); WARN_ON(ops->add_virtual_intf && !ops->del_virtual_intf); WARN_ON(ops->add_station && !ops->del_station); WARN_ON(ops->add_mpath && !ops->del_mpath); WARN_ON(ops->join_mesh && !ops->leave_mesh); WARN_ON(ops->start_p2p_device && !ops->stop_p2p_device); WARN_ON(ops->start_ap && !ops->stop_ap); WARN_ON(ops->join_ocb && !ops->leave_ocb); WARN_ON(ops->suspend && !ops->resume); WARN_ON(ops->sched_scan_start && !ops->sched_scan_stop); WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel); WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch); WARN_ON(ops->add_tx_ts && !ops->del_tx_ts); alloc_size = sizeof(*rdev) + sizeof_priv; rdev = kzalloc(alloc_size, GFP_KERNEL); if (!rdev) return NULL; rdev->ops = ops; rdev->wiphy_idx = atomic_inc_return(&wiphy_counter); if (unlikely(rdev->wiphy_idx < 0)) { /* ugh, wrapped! */ atomic_dec(&wiphy_counter); kfree(rdev); return NULL; } /* atomic_inc_return makes it start at 1, make it start at 0 */ rdev->wiphy_idx--; /* give it a proper name */ if (requested_name && requested_name[0]) { int rv; rtnl_lock(); rv = cfg80211_dev_check_name(rdev, requested_name); if (rv < 0) { rtnl_unlock(); goto use_default_name; } rv = dev_set_name(&rdev->wiphy.dev, "%s", requested_name); rtnl_unlock(); if (rv) goto use_default_name; } else { int rv; use_default_name: /* NOTE: This is *probably* safe w/out holding rtnl because of * the restrictions on phy names. Probably this call could * fail if some other part of the kernel (re)named a device * phyX. But, might should add some locking and check return * value, and use a different name if this one exists? */ rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); if (rv < 0) { kfree(rdev); return NULL; } } mutex_init(&rdev->wiphy.mtx); INIT_LIST_HEAD(&rdev->wiphy.wdev_list); INIT_LIST_HEAD(&rdev->beacon_registrations); spin_lock_init(&rdev->beacon_registrations_lock); spin_lock_init(&rdev->bss_lock); INIT_LIST_HEAD(&rdev->bss_list); INIT_LIST_HEAD(&rdev->sched_scan_req_list); wiphy_work_init(&rdev->scan_done_wk, __cfg80211_scan_done); INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk, cfg80211_dfs_channels_update_work); #ifdef CONFIG_CFG80211_WEXT rdev->wiphy.wext = &cfg80211_wext_handler; #endif device_initialize(&rdev->wiphy.dev); rdev->wiphy.dev.class = &ieee80211_class; rdev->wiphy.dev.platform_data = rdev; device_enable_async_suspend(&rdev->wiphy.dev); INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk); wiphy_work_init(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk); INIT_WORK(&rdev->sched_scan_res_wk, cfg80211_sched_scan_results_wk); INIT_WORK(&rdev->propagate_radar_detect_wk, cfg80211_propagate_radar_detect_wk); INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk); INIT_WORK(&rdev->mgmt_registrations_update_wk, cfg80211_mgmt_registrations_update_wk); spin_lock_init(&rdev->mgmt_registrations_lock); INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work); INIT_LIST_HEAD(&rdev->wiphy_work_list); spin_lock_init(&rdev->wiphy_work_lock); #ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; #endif wiphy_net_set(&rdev->wiphy, &init_net); rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block; rdev->wiphy.rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev), &rdev->wiphy.dev, RFKILL_TYPE_WLAN, &rdev->rfkill_ops, rdev); if (!rdev->wiphy.rfkill) { wiphy_free(&rdev->wiphy); return NULL; } INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work); INIT_WORK(&rdev->conn_work, cfg80211_conn_work); INIT_WORK(&rdev->event_work, cfg80211_event_work); INIT_WORK(&rdev->background_cac_abort_wk, cfg80211_background_cac_abort_wk); INIT_DELAYED_WORK(&rdev->background_cac_done_wk, cfg80211_background_cac_done_wk); init_waitqueue_head(&rdev->dev_wait); /* * Initialize wiphy parameters to IEEE 802.11 MIB default values. * Fragmentation and RTS threshold are disabled by default with the * special -1 value. */ rdev->wiphy.retry_short = 7; rdev->wiphy.retry_long = 4; rdev->wiphy.frag_threshold = (u32) -1; rdev->wiphy.rts_threshold = (u32) -1; rdev->wiphy.coverage_class = 0; rdev->wiphy.max_num_csa_counters = 1; rdev->wiphy.max_sched_scan_plans = 1; rdev->wiphy.max_sched_scan_plan_interval = U32_MAX; return &rdev->wiphy; } EXPORT_SYMBOL(wiphy_new_nm); static int wiphy_verify_iface_combinations(struct wiphy *wiphy, const struct ieee80211_iface_combination *iface_comb, int n_iface_comb, bool combined_radio) { const struct ieee80211_iface_combination *c; int i, j; for (i = 0; i < n_iface_comb; i++) { u32 cnt = 0; u16 all_iftypes = 0; c = &iface_comb[i]; /* * Combinations with just one interface aren't real, * however we make an exception for DFS. */ if (WARN_ON((c->max_interfaces < 2) && !c->radar_detect_widths)) return -EINVAL; /* Need at least one channel */ if (WARN_ON(!c->num_different_channels)) return -EINVAL; /* DFS only works on one channel. Avoid this check * for multi-radio global combination, since it hold * the capabilities of all radio combinations. */ if (!combined_radio && WARN_ON(c->radar_detect_widths && c->num_different_channels > 1)) return -EINVAL; if (WARN_ON(!c->n_limits)) return -EINVAL; for (j = 0; j < c->n_limits; j++) { u16 types = c->limits[j].types; /* interface types shouldn't overlap */ if (WARN_ON(types & all_iftypes)) return -EINVAL; all_iftypes |= types; if (WARN_ON(!c->limits[j].max)) return -EINVAL; /* Shouldn't list software iftypes in combinations! */ if (WARN_ON(wiphy->software_iftypes & types)) return -EINVAL; /* Only a single P2P_DEVICE can be allowed, avoid this * check for multi-radio global combination, since it * hold the capabilities of all radio combinations. */ if (!combined_radio && WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) && c->limits[j].max > 1)) return -EINVAL; /* Only a single NAN can be allowed, avoid this * check for multi-radio global combination, since it * hold the capabilities of all radio combinations. */ if (!combined_radio && WARN_ON(types & BIT(NL80211_IFTYPE_NAN) && c->limits[j].max > 1)) return -EINVAL; /* * This isn't well-defined right now. If you have an * IBSS interface, then its beacon interval may change * by joining other networks, and nothing prevents it * from doing that. * So technically we probably shouldn't even allow AP * and IBSS in the same interface, but it seems that * some drivers support that, possibly only with fixed * beacon intervals for IBSS. */ if (WARN_ON(types & BIT(NL80211_IFTYPE_ADHOC) && c->beacon_int_min_gcd)) { return -EINVAL; } cnt += c->limits[j].max; /* * Don't advertise an unsupported type * in a combination. */ if (WARN_ON((wiphy->interface_modes & types) != types)) return -EINVAL; } if (WARN_ON(all_iftypes & BIT(NL80211_IFTYPE_WDS))) return -EINVAL; /* You can't even choose that many! */ if (WARN_ON(cnt < c->max_interfaces)) return -EINVAL; } return 0; } static int wiphy_verify_combinations(struct wiphy *wiphy) { int i, ret; bool combined_radio = false; if (wiphy->n_radio) { for (i = 0; i < wiphy->n_radio; i++) { const struct wiphy_radio *radio = &wiphy->radio[i]; ret = wiphy_verify_iface_combinations(wiphy, radio->iface_combinations, radio->n_iface_combinations, false); if (ret) return ret; } combined_radio = true; } ret = wiphy_verify_iface_combinations(wiphy, wiphy->iface_combinations, wiphy->n_iface_combinations, combined_radio); return ret; } int wiphy_register(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); int res; enum nl80211_band band; struct ieee80211_supported_band *sband; bool have_band = false; int i; u16 ifmodes = wiphy->interface_modes; #ifdef CONFIG_PM if (WARN_ON(wiphy->wowlan && (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && !(wiphy->wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) return -EINVAL; if (WARN_ON(wiphy->wowlan && !wiphy->wowlan->flags && !wiphy->wowlan->n_patterns && !wiphy->wowlan->tcp)) return -EINVAL; #endif if (WARN_ON((wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) && (!rdev->ops->tdls_channel_switch || !rdev->ops->tdls_cancel_channel_switch))) return -EINVAL; if (WARN_ON((wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN)) && (!rdev->ops->start_nan || !rdev->ops->stop_nan || !rdev->ops->add_nan_func || !rdev->ops->del_nan_func || !(wiphy->nan_supported_bands & BIT(NL80211_BAND_2GHZ))))) return -EINVAL; if (WARN_ON(wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))) return -EINVAL; if (WARN_ON(wiphy->pmsr_capa && !wiphy->pmsr_capa->ftm.supported)) return -EINVAL; if (wiphy->pmsr_capa && wiphy->pmsr_capa->ftm.supported) { if (WARN_ON(!wiphy->pmsr_capa->ftm.asap && !wiphy->pmsr_capa->ftm.non_asap)) return -EINVAL; if (WARN_ON(!wiphy->pmsr_capa->ftm.preambles || !wiphy->pmsr_capa->ftm.bandwidths)) return -EINVAL; if (WARN_ON(wiphy->pmsr_capa->ftm.preambles & ~(BIT(NL80211_PREAMBLE_LEGACY) | BIT(NL80211_PREAMBLE_HT) | BIT(NL80211_PREAMBLE_VHT) | BIT(NL80211_PREAMBLE_HE) | BIT(NL80211_PREAMBLE_DMG)))) return -EINVAL; if (WARN_ON((wiphy->pmsr_capa->ftm.trigger_based || wiphy->pmsr_capa->ftm.non_trigger_based) && !(wiphy->pmsr_capa->ftm.preambles & BIT(NL80211_PREAMBLE_HE)))) return -EINVAL; if (WARN_ON(wiphy->pmsr_capa->ftm.bandwidths & ~(BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_80P80) | BIT(NL80211_CHAN_WIDTH_160) | BIT(NL80211_CHAN_WIDTH_320) | BIT(NL80211_CHAN_WIDTH_5) | BIT(NL80211_CHAN_WIDTH_10)))) return -EINVAL; } if (WARN_ON((wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) && (wiphy->regulatory_flags & (REGULATORY_CUSTOM_REG | REGULATORY_STRICT_REG | REGULATORY_COUNTRY_IE_FOLLOW_POWER | REGULATORY_COUNTRY_IE_IGNORE)))) return -EINVAL; if (WARN_ON(wiphy->coalesce && (!wiphy->coalesce->n_rules || !wiphy->coalesce->n_patterns) && (!wiphy->coalesce->pattern_min_len || wiphy->coalesce->pattern_min_len > wiphy->coalesce->pattern_max_len))) return -EINVAL; if (WARN_ON(wiphy->ap_sme_capa && !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME))) return -EINVAL; if (WARN_ON(wiphy->addresses && !wiphy->n_addresses)) return -EINVAL; if (WARN_ON(wiphy->addresses && !is_zero_ether_addr(wiphy->perm_addr) && memcmp(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN))) return -EINVAL; if (WARN_ON(wiphy->max_acl_mac_addrs && (!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME) || !rdev->ops->set_mac_acl))) return -EINVAL; /* assure only valid behaviours are flagged by driver * hence subtract 2 as bit 0 is invalid. */ if (WARN_ON(wiphy->bss_select_support && (wiphy->bss_select_support & ~(BIT(__NL80211_BSS_SELECT_ATTR_AFTER_LAST) - 2)))) return -EINVAL; if (WARN_ON(wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X) && (!rdev->ops->set_pmk || !rdev->ops->del_pmk))) return -EINVAL; if (WARN_ON(!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) && rdev->ops->update_connect_params)) return -EINVAL; if (wiphy->addresses) memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN); /* sanity check ifmodes */ WARN_ON(!ifmodes); ifmodes &= ((1 << NUM_NL80211_IFTYPES) - 1) & ~1; if (WARN_ON(ifmodes != wiphy->interface_modes)) wiphy->interface_modes = ifmodes; res = wiphy_verify_combinations(wiphy); if (res) return res; /* sanity check supported bands/channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { const struct ieee80211_sband_iftype_data *iftd; u16 types = 0; bool have_he = false; sband = wiphy->bands[band]; if (!sband) continue; sband->band = band; if (WARN_ON(!sband->n_channels)) return -EINVAL; /* * on 60GHz or sub-1Ghz band, there are no legacy rates, so * n_bitrates is 0 */ if (WARN_ON((band != NL80211_BAND_60GHZ && band != NL80211_BAND_S1GHZ) && !sband->n_bitrates)) return -EINVAL; if (WARN_ON(band == NL80211_BAND_6GHZ && (sband->ht_cap.ht_supported || sband->vht_cap.vht_supported))) return -EINVAL; /* * Since cfg80211_disable_40mhz_24ghz is global, we can * modify the sband's ht data even if the driver uses a * global structure for that. */ if (cfg80211_disable_40mhz_24ghz && band == NL80211_BAND_2GHZ && sband->ht_cap.ht_supported) { sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40; } /* * Since we use a u32 for rate bitmaps in * ieee80211_get_response_rate, we cannot * have more than 32 legacy rates. */ if (WARN_ON(sband->n_bitrates > 32)) return -EINVAL; for (i = 0; i < sband->n_channels; i++) { sband->channels[i].orig_flags = sband->channels[i].flags; sband->channels[i].orig_mag = INT_MAX; sband->channels[i].orig_mpwr = sband->channels[i].max_power; sband->channels[i].band = band; if (WARN_ON(sband->channels[i].freq_offset >= 1000)) return -EINVAL; } for_each_sband_iftype_data(sband, i, iftd) { bool has_ap, has_non_ap; u32 ap_bits = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO); if (WARN_ON(!iftd->types_mask)) return -EINVAL; if (WARN_ON(types & iftd->types_mask)) return -EINVAL; /* at least one piece of information must be present */ if (WARN_ON(!iftd->he_cap.has_he)) return -EINVAL; types |= iftd->types_mask; if (i == 0) have_he = iftd->he_cap.has_he; else have_he = have_he && iftd->he_cap.has_he; has_ap = iftd->types_mask & ap_bits; has_non_ap = iftd->types_mask & ~ap_bits; /* * For EHT 20 MHz STA, the capabilities format differs * but to simplify, don't check 20 MHz but rather check * only if AP and non-AP were mentioned at the same time, * reject if so. */ if (WARN_ON(iftd->eht_cap.has_eht && has_ap && has_non_ap)) return -EINVAL; } if (WARN_ON(!have_he && band == NL80211_BAND_6GHZ)) return -EINVAL; have_band = true; } if (!have_band) { WARN_ON(1); return -EINVAL; } for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) { /* * Validate we have a policy (can be explicitly set to * VENDOR_CMD_RAW_DATA which is non-NULL) and also that * we have at least one of doit/dumpit. */ if (WARN_ON(!rdev->wiphy.vendor_commands[i].policy)) return -EINVAL; if (WARN_ON(!rdev->wiphy.vendor_commands[i].doit && !rdev->wiphy.vendor_commands[i].dumpit)) return -EINVAL; } #ifdef CONFIG_PM if (WARN_ON(rdev->wiphy.wowlan && rdev->wiphy.wowlan->n_patterns && (!rdev->wiphy.wowlan->pattern_min_len || rdev->wiphy.wowlan->pattern_min_len > rdev->wiphy.wowlan->pattern_max_len))) return -EINVAL; #endif if (!wiphy->max_num_akm_suites) wiphy->max_num_akm_suites = NL80211_MAX_NR_AKM_SUITES; else if (wiphy->max_num_akm_suites < NL80211_MAX_NR_AKM_SUITES || wiphy->max_num_akm_suites > CFG80211_MAX_NUM_AKM_SUITES) return -EINVAL; /* check and set up bitrates */ ieee80211_set_bitrate_flags(wiphy); rdev->wiphy.features |= NL80211_FEATURE_SCAN_FLUSH; rtnl_lock(); wiphy_lock(&rdev->wiphy); res = device_add(&rdev->wiphy.dev); if (res) { wiphy_unlock(&rdev->wiphy); rtnl_unlock(); return res; } list_add_rcu(&rdev->list, &cfg80211_rdev_list); cfg80211_rdev_list_generation++; /* add to debugfs */ rdev->wiphy.debugfsdir = debugfs_create_dir(wiphy_name(&rdev->wiphy), ieee80211_debugfs_dir); cfg80211_debugfs_rdev_add(rdev); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); wiphy_unlock(&rdev->wiphy); /* set up regulatory info */ wiphy_regulatory_register(wiphy); if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { struct regulatory_request request; request.wiphy_idx = get_wiphy_idx(wiphy); request.initiator = NL80211_REGDOM_SET_BY_DRIVER; request.alpha2[0] = '9'; request.alpha2[1] = '9'; nl80211_send_reg_change_event(&request); } /* Check that nobody globally advertises any capabilities they do not * advertise on all possible interface types. */ if (wiphy->extended_capabilities_len && wiphy->num_iftype_ext_capab && wiphy->iftype_ext_capab) { u8 supported_on_all, j; const struct wiphy_iftype_ext_capab *capab; capab = wiphy->iftype_ext_capab; for (j = 0; j < wiphy->extended_capabilities_len; j++) { if (capab[0].extended_capabilities_len > j) supported_on_all = capab[0].extended_capabilities[j]; else supported_on_all = 0x00; for (i = 1; i < wiphy->num_iftype_ext_capab; i++) { if (j >= capab[i].extended_capabilities_len) { supported_on_all = 0x00; break; } supported_on_all &= capab[i].extended_capabilities[j]; } if (WARN_ON(wiphy->extended_capabilities[j] & ~supported_on_all)) break; } } rdev->wiphy.registered = true; rtnl_unlock(); res = rfkill_register(rdev->wiphy.rfkill); if (res) { rfkill_destroy(rdev->wiphy.rfkill); rdev->wiphy.rfkill = NULL; wiphy_unregister(&rdev->wiphy); return res; } return 0; } EXPORT_SYMBOL(wiphy_register); void wiphy_rfkill_start_polling(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); if (!rdev->ops->rfkill_poll) return; rdev->rfkill_ops.poll = cfg80211_rfkill_poll; rfkill_resume_polling(wiphy->rfkill); } EXPORT_SYMBOL(wiphy_rfkill_start_polling); void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev, struct wiphy_work *end) { unsigned int runaway_limit = 100; unsigned long flags; lockdep_assert_held(&rdev->wiphy.mtx); spin_lock_irqsave(&rdev->wiphy_work_lock, flags); while (!list_empty(&rdev->wiphy_work_list)) { struct wiphy_work *wk; wk = list_first_entry(&rdev->wiphy_work_list, struct wiphy_work, entry); list_del_init(&wk->entry); spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); trace_wiphy_work_run(&rdev->wiphy, wk); wk->func(&rdev->wiphy, wk); spin_lock_irqsave(&rdev->wiphy_work_lock, flags); if (wk == end) break; if (WARN_ON(--runaway_limit == 0)) INIT_LIST_HEAD(&rdev->wiphy_work_list); } spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); } void wiphy_unregister(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); wait_event(rdev->dev_wait, ({ int __count; wiphy_lock(&rdev->wiphy); __count = rdev->opencount; wiphy_unlock(&rdev->wiphy); __count == 0; })); if (rdev->wiphy.rfkill) rfkill_unregister(rdev->wiphy.rfkill); rtnl_lock(); wiphy_lock(&rdev->wiphy); nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY); rdev->wiphy.registered = false; WARN_ON(!list_empty(&rdev->wiphy.wdev_list)); /* * First remove the hardware from everywhere, this makes * it impossible to find from userspace. */ debugfs_remove_recursive(rdev->wiphy.debugfsdir); list_del_rcu(&rdev->list); synchronize_rcu(); /* * If this device got a regulatory hint tell core its * free to listen now to a new shiny device regulatory hint */ wiphy_regulatory_deregister(wiphy); cfg80211_rdev_list_generation++; device_del(&rdev->wiphy.dev); #ifdef CONFIG_PM if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) rdev_set_wakeup(rdev, false); #endif /* surely nothing is reachable now, clean up work */ cfg80211_process_wiphy_works(rdev, NULL); wiphy_unlock(&rdev->wiphy); rtnl_unlock(); /* this has nothing to do now but make sure it's gone */ cancel_work_sync(&rdev->wiphy_work); cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); cancel_delayed_work_sync(&rdev->background_cac_done_wk); flush_work(&rdev->destroy_work); flush_work(&rdev->propagate_radar_detect_wk); flush_work(&rdev->propagate_cac_done_wk); flush_work(&rdev->mgmt_registrations_update_wk); flush_work(&rdev->background_cac_abort_wk); cfg80211_rdev_free_wowlan(rdev); cfg80211_free_coalesce(rdev->coalesce); rdev->coalesce = NULL; } EXPORT_SYMBOL(wiphy_unregister); void cfg80211_dev_free(struct cfg80211_registered_device *rdev) { struct cfg80211_internal_bss *scan, *tmp; struct cfg80211_beacon_registration *reg, *treg; unsigned long flags; spin_lock_irqsave(&rdev->wiphy_work_lock, flags); WARN_ON(!list_empty(&rdev->wiphy_work_list)); spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); cancel_work_sync(&rdev->wiphy_work); rfkill_destroy(rdev->wiphy.rfkill); list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) { list_del(®->list); kfree(reg); } list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&rdev->wiphy, &scan->pub); mutex_destroy(&rdev->wiphy.mtx); /* * The 'regd' can only be non-NULL if we never finished * initializing the wiphy and thus never went through the * unregister path - e.g. in failure scenarios. Thus, it * cannot have been visible to anyone if non-NULL, so we * can just free it here. */ kfree(rcu_dereference_raw(rdev->wiphy.regd)); kfree(rdev); } void wiphy_free(struct wiphy *wiphy) { put_device(&wiphy->dev); } EXPORT_SYMBOL(wiphy_free); void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked, enum rfkill_hard_block_reasons reason) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); if (rfkill_set_hw_state_reason(wiphy->rfkill, blocked, reason)) schedule_work(&rdev->rfkill_block); } EXPORT_SYMBOL(wiphy_rfkill_set_hw_state_reason); static void _cfg80211_unregister_wdev(struct wireless_dev *wdev, bool unregister_netdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_cqm_config *cqm_config; unsigned int link_id; ASSERT_RTNL(); lockdep_assert_held(&rdev->wiphy.mtx); nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); wdev->registered = false; if (wdev->netdev) { sysfs_remove_link(&wdev->netdev->dev.kobj, "phy80211"); if (unregister_netdev) unregister_netdevice(wdev->netdev); } list_del_rcu(&wdev->list); synchronize_net(); rdev->devlist_generation++; cfg80211_mlme_purge_registrations(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: cfg80211_stop_p2p_device(rdev, wdev); break; case NL80211_IFTYPE_NAN: cfg80211_stop_nan(rdev, wdev); break; default: break; } #ifdef CONFIG_CFG80211_WEXT kfree_sensitive(wdev->wext.keys); wdev->wext.keys = NULL; #endif wiphy_work_cancel(wdev->wiphy, &wdev->cqm_rssi_work); /* deleted from the list, so can't be found from nl80211 any more */ cqm_config = rcu_access_pointer(wdev->cqm_config); kfree_rcu(cqm_config, rcu_head); RCU_INIT_POINTER(wdev->cqm_config, NULL); /* * Ensure that all events have been processed and * freed. */ cfg80211_process_wdev_events(wdev); if (wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) { for (link_id = 0; link_id < ARRAY_SIZE(wdev->links); link_id++) { struct cfg80211_internal_bss *curbss; curbss = wdev->links[link_id].client.current_bss; if (WARN_ON(curbss)) { cfg80211_unhold_bss(curbss); cfg80211_put_bss(wdev->wiphy, &curbss->pub); wdev->links[link_id].client.current_bss = NULL; } } } wdev->connected = false; } void cfg80211_unregister_wdev(struct wireless_dev *wdev) { _cfg80211_unregister_wdev(wdev, true); } EXPORT_SYMBOL(cfg80211_unregister_wdev); static const struct device_type wiphy_type = { .name = "wlan", }; void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, int num) { lockdep_assert_held(&rdev->wiphy.mtx); rdev->num_running_ifaces += num; if (iftype == NL80211_IFTYPE_MONITOR) rdev->num_running_monitor_ifaces += num; } void cfg80211_leave(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct net_device *dev = wdev->netdev; struct cfg80211_sched_scan_request *pos, *tmp; lockdep_assert_held(&rdev->wiphy.mtx); cfg80211_pmsr_wdev_down(wdev); cfg80211_stop_background_radar_detection(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: cfg80211_leave_ibss(rdev, dev, true); break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: list_for_each_entry_safe(pos, tmp, &rdev->sched_scan_req_list, list) { if (dev == pos->dev) cfg80211_stop_sched_scan_req(rdev, pos, false); } #ifdef CONFIG_CFG80211_WEXT kfree(wdev->wext.ie); wdev->wext.ie = NULL; wdev->wext.ie_len = 0; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; #endif cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, true); break; case NL80211_IFTYPE_MESH_POINT: cfg80211_leave_mesh(rdev, dev); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: cfg80211_stop_ap(rdev, dev, -1, true); break; case NL80211_IFTYPE_OCB: cfg80211_leave_ocb(rdev, dev); break; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: /* cannot happen, has no netdev */ break; case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: /* nothing to do */ break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_WDS: case NUM_NL80211_IFTYPES: /* invalid */ break; } } void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_event *ev; unsigned long flags; trace_cfg80211_stop_iface(wiphy, wdev); ev = kzalloc(sizeof(*ev), gfp); if (!ev) return; ev->type = EVENT_STOPPED; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_stop_iface); void cfg80211_init_wdev(struct wireless_dev *wdev) { INIT_LIST_HEAD(&wdev->event_list); spin_lock_init(&wdev->event_lock); INIT_LIST_HEAD(&wdev->mgmt_registrations); INIT_LIST_HEAD(&wdev->pmsr_list); spin_lock_init(&wdev->pmsr_lock); INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk); #ifdef CONFIG_CFG80211_WEXT wdev->wext.default_key = -1; wdev->wext.default_mgmt_key = -1; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; #endif wiphy_work_init(&wdev->cqm_rssi_work, cfg80211_cqm_rssi_notify_work); if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) wdev->ps = true; else wdev->ps = false; /* allow mac80211 to determine the timeout */ wdev->ps_timeout = -1; wdev->radio_mask = BIT(wdev->wiphy->n_radio) - 1; if ((wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) wdev->netdev->priv_flags |= IFF_DONT_BRIDGE; INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); } void cfg80211_register_wdev(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { ASSERT_RTNL(); lockdep_assert_held(&rdev->wiphy.mtx); /* * We get here also when the interface changes network namespaces, * as it's registered into the new one, but we don't want it to * change ID in that case. Checking if the ID is already assigned * works, because 0 isn't considered a valid ID and the memory is * 0-initialized. */ if (!wdev->identifier) wdev->identifier = ++rdev->wdev_id; list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list); rdev->devlist_generation++; wdev->registered = true; if (wdev->netdev && sysfs_create_link(&wdev->netdev->dev.kobj, &rdev->wiphy.dev.kobj, "phy80211")) pr_err("failed to add phy80211 symlink to netdev!\n"); nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); } int cfg80211_register_netdevice(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; int ret; ASSERT_RTNL(); if (WARN_ON(!wdev)) return -EINVAL; rdev = wiphy_to_rdev(wdev->wiphy); lockdep_assert_held(&rdev->wiphy.mtx); /* we'll take care of this */ wdev->registered = true; wdev->registering = true; ret = register_netdevice(dev); if (ret) goto out; cfg80211_register_wdev(rdev, wdev); ret = 0; out: wdev->registering = false; if (ret) wdev->registered = false; return ret; } EXPORT_SYMBOL(cfg80211_register_netdevice); static int cfg80211_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; struct cfg80211_sched_scan_request *pos, *tmp; if (!wdev) return NOTIFY_DONE; rdev = wiphy_to_rdev(wdev->wiphy); WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); switch (state) { case NETDEV_POST_INIT: SET_NETDEV_DEVTYPE(dev, &wiphy_type); wdev->netdev = dev; /* can only change netns with wiphy */ dev->netns_immutable = true; cfg80211_init_wdev(wdev); break; case NETDEV_REGISTER: if (!wdev->registered) { guard(wiphy)(&rdev->wiphy); cfg80211_register_wdev(rdev, wdev); } break; case NETDEV_UNREGISTER: /* * It is possible to get NETDEV_UNREGISTER multiple times, * so check wdev->registered. */ if (wdev->registered && !wdev->registering) { guard(wiphy)(&rdev->wiphy); _cfg80211_unregister_wdev(wdev, false); } break; case NETDEV_GOING_DOWN: scoped_guard(wiphy, &rdev->wiphy) { cfg80211_leave(rdev, wdev); cfg80211_remove_links(wdev); } /* since we just did cfg80211_leave() nothing to do there */ cancel_work_sync(&wdev->disconnect_wk); cancel_work_sync(&wdev->pmsr_free_wk); break; case NETDEV_DOWN: wiphy_lock(&rdev->wiphy); cfg80211_update_iface_num(rdev, wdev->iftype, -1); if (rdev->scan_req && rdev->scan_req->wdev == wdev) { if (WARN_ON(!rdev->scan_req->notified && (!rdev->int_scan_req || !rdev->int_scan_req->notified))) rdev->scan_req->info.aborted = true; ___cfg80211_scan_done(rdev, false); } list_for_each_entry_safe(pos, tmp, &rdev->sched_scan_req_list, list) { if (WARN_ON(pos->dev == wdev->netdev)) cfg80211_stop_sched_scan_req(rdev, pos, false); } rdev->opencount--; wiphy_unlock(&rdev->wiphy); wake_up(&rdev->dev_wait); break; case NETDEV_UP: wiphy_lock(&rdev->wiphy); cfg80211_update_iface_num(rdev, wdev->iftype, 1); switch (wdev->iftype) { #ifdef CONFIG_CFG80211_WEXT case NL80211_IFTYPE_ADHOC: cfg80211_ibss_wext_join(rdev, wdev); break; case NL80211_IFTYPE_STATION: cfg80211_mgd_wext_connect(rdev, wdev); break; #endif #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: { /* backward compat code... */ struct mesh_setup setup; memcpy(&setup, &default_mesh_setup, sizeof(setup)); /* back compat only needed for mesh_id */ setup.mesh_id = wdev->u.mesh.id; setup.mesh_id_len = wdev->u.mesh.id_up_len; if (wdev->u.mesh.id_up_len) __cfg80211_join_mesh(rdev, dev, &setup, &default_mesh_config); break; } #endif default: break; } rdev->opencount++; /* * Configure power management to the driver here so that its * correctly set also after interface type changes etc. */ if ((wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && rdev->ops->set_power_mgmt && rdev_set_power_mgmt(rdev, dev, wdev->ps, wdev->ps_timeout)) { /* assume this means it's off */ wdev->ps = false; } wiphy_unlock(&rdev->wiphy); break; case NETDEV_PRE_UP: if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype, wdev->use_4addr, 0)) return notifier_from_errno(-EOPNOTSUPP); if (rfkill_blocked(rdev->wiphy.rfkill)) return notifier_from_errno(-ERFKILL); break; default: return NOTIFY_DONE; } wireless_nlevent_flush(); return NOTIFY_OK; } static struct notifier_block cfg80211_netdev_notifier = { .notifier_call = cfg80211_netdev_notifier_call, }; static void __net_exit cfg80211_pernet_exit(struct net *net) { struct cfg80211_registered_device *rdev; rtnl_lock(); for_each_rdev(rdev) { if (net_eq(wiphy_net(&rdev->wiphy), net)) WARN_ON(cfg80211_switch_netns(rdev, &init_net)); } rtnl_unlock(); } static struct pernet_operations cfg80211_pernet_ops = { .exit = cfg80211_pernet_exit, }; void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); unsigned long flags; trace_wiphy_work_queue(wiphy, work); spin_lock_irqsave(&rdev->wiphy_work_lock, flags); if (list_empty(&work->entry)) list_add_tail(&work->entry, &rdev->wiphy_work_list); spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); queue_work(system_unbound_wq, &rdev->wiphy_work); } EXPORT_SYMBOL_GPL(wiphy_work_queue); void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); unsigned long flags; lockdep_assert_held(&wiphy->mtx); trace_wiphy_work_cancel(wiphy, work); spin_lock_irqsave(&rdev->wiphy_work_lock, flags); if (!list_empty(&work->entry)) list_del_init(&work->entry); spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); } EXPORT_SYMBOL_GPL(wiphy_work_cancel); void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); unsigned long flags; bool run; trace_wiphy_work_flush(wiphy, work); spin_lock_irqsave(&rdev->wiphy_work_lock, flags); run = !work || !list_empty(&work->entry); spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); if (run) cfg80211_process_wiphy_works(rdev, work); } EXPORT_SYMBOL_GPL(wiphy_work_flush); void wiphy_delayed_work_timer(struct timer_list *t) { struct wiphy_delayed_work *dwork = timer_container_of(dwork, t, timer); wiphy_work_queue(dwork->wiphy, &dwork->work); } EXPORT_SYMBOL(wiphy_delayed_work_timer); void wiphy_delayed_work_queue(struct wiphy *wiphy, struct wiphy_delayed_work *dwork, unsigned long delay) { trace_wiphy_delayed_work_queue(wiphy, &dwork->work, delay); if (!delay) { timer_delete(&dwork->timer); wiphy_work_queue(wiphy, &dwork->work); return; } dwork->wiphy = wiphy; mod_timer(&dwork->timer, jiffies + delay); } EXPORT_SYMBOL_GPL(wiphy_delayed_work_queue); void wiphy_delayed_work_cancel(struct wiphy *wiphy, struct wiphy_delayed_work *dwork) { lockdep_assert_held(&wiphy->mtx); timer_delete_sync(&dwork->timer); wiphy_work_cancel(wiphy, &dwork->work); } EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel); void wiphy_delayed_work_flush(struct wiphy *wiphy, struct wiphy_delayed_work *dwork) { lockdep_assert_held(&wiphy->mtx); timer_delete_sync(&dwork->timer); wiphy_work_flush(wiphy, &dwork->work); } EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush); bool wiphy_delayed_work_pending(struct wiphy *wiphy, struct wiphy_delayed_work *dwork) { return timer_pending(&dwork->timer); } EXPORT_SYMBOL_GPL(wiphy_delayed_work_pending); static int __init cfg80211_init(void) { int err; err = register_pernet_device(&cfg80211_pernet_ops); if (err) goto out_fail_pernet; err = wiphy_sysfs_init(); if (err) goto out_fail_sysfs; err = register_netdevice_notifier(&cfg80211_netdev_notifier); if (err) goto out_fail_notifier; err = nl80211_init(); if (err) goto out_fail_nl80211; ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); err = regulatory_init(); if (err) goto out_fail_reg; cfg80211_wq = alloc_ordered_workqueue("cfg80211", WQ_MEM_RECLAIM); if (!cfg80211_wq) { err = -ENOMEM; goto out_fail_wq; } return 0; out_fail_wq: regulatory_exit(); out_fail_reg: debugfs_remove(ieee80211_debugfs_dir); nl80211_exit(); out_fail_nl80211: unregister_netdevice_notifier(&cfg80211_netdev_notifier); out_fail_notifier: wiphy_sysfs_exit(); out_fail_sysfs: unregister_pernet_device(&cfg80211_pernet_ops); out_fail_pernet: return err; } fs_initcall(cfg80211_init); static void __exit cfg80211_exit(void) { debugfs_remove(ieee80211_debugfs_dir); nl80211_exit(); unregister_netdevice_notifier(&cfg80211_netdev_notifier); wiphy_sysfs_exit(); regulatory_exit(); unregister_pernet_device(&cfg80211_pernet_ops); destroy_workqueue(cfg80211_wq); } module_exit(cfg80211_exit); |
3 1 2 2 2 2 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux driver for TerraTec DMX 6Fire USB * * Firmware loader * * Author: Torsten Schenk <torsten.schenk@zoho.com> * Created: Jan 01, 2011 * Copyright: (C) Torsten Schenk */ #include <linux/firmware.h> #include <linux/module.h> #include <linux/bitrev.h> #include <linux/kernel.h> #include "firmware.h" #include "chip.h" MODULE_FIRMWARE("6fire/dmx6firel2.ihx"); MODULE_FIRMWARE("6fire/dmx6fireap.ihx"); MODULE_FIRMWARE("6fire/dmx6firecf.bin"); enum { FPGA_BUFSIZE = 512, FPGA_EP = 2 }; /* * wMaxPacketSize of pcm endpoints. * keep synced with rates_in_packet_size and rates_out_packet_size in pcm.c * fpp: frames per isopacket * * CAUTION: keep sizeof <= buffer[] in usb6fire_fw_init */ static const u8 ep_w_max_packet_size[] = { 0xe4, 0x00, 0xe4, 0x00, /* alt 1: 228 EP2 and EP6 (7 fpp) */ 0xa4, 0x01, 0xa4, 0x01, /* alt 2: 420 EP2 and EP6 (13 fpp)*/ 0x94, 0x01, 0x5c, 0x02 /* alt 3: 404 EP2 and 604 EP6 (25 fpp) */ }; static const u8 known_fw_versions[][2] = { { 0x03, 0x01 } }; struct ihex_record { u16 address; u8 len; u8 data[256]; char error; /* true if an error occurred parsing this record */ u8 max_len; /* maximum record length in whole ihex */ /* private */ const char *txt_data; unsigned int txt_length; unsigned int txt_offset; /* current position in txt_data */ }; static u8 usb6fire_fw_ihex_hex(const u8 *data, u8 *crc) { u8 val = 0; int hval; hval = hex_to_bin(data[0]); if (hval >= 0) val |= (hval << 4); hval = hex_to_bin(data[1]); if (hval >= 0) val |= hval; *crc += val; return val; } /* * returns true if record is available, false otherwise. * iff an error occurred, false will be returned and record->error will be true. */ static bool usb6fire_fw_ihex_next_record(struct ihex_record *record) { u8 crc = 0; u8 type; int i; record->error = false; /* find begin of record (marked by a colon) */ while (record->txt_offset < record->txt_length && record->txt_data[record->txt_offset] != ':') record->txt_offset++; if (record->txt_offset == record->txt_length) return false; /* number of characters needed for len, addr and type entries */ record->txt_offset++; if (record->txt_offset + 8 > record->txt_length) { record->error = true; return false; } record->len = usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); record->txt_offset += 2; record->address = usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc) << 8; record->txt_offset += 2; record->address |= usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); record->txt_offset += 2; type = usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); record->txt_offset += 2; /* number of characters needed for data and crc entries */ if (record->txt_offset + 2 * (record->len + 1) > record->txt_length) { record->error = true; return false; } for (i = 0; i < record->len; i++) { record->data[i] = usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); record->txt_offset += 2; } usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); if (crc) { record->error = true; return false; } if (type == 1 || !record->len) /* eof */ return false; else if (type == 0) return true; else { record->error = true; return false; } } static int usb6fire_fw_ihex_init(const struct firmware *fw, struct ihex_record *record) { record->txt_data = fw->data; record->txt_length = fw->size; record->txt_offset = 0; record->max_len = 0; /* read all records, if loop ends, record->error indicates, * whether ihex is valid. */ while (usb6fire_fw_ihex_next_record(record)) record->max_len = max(record->len, record->max_len); if (record->error) return -EINVAL; record->txt_offset = 0; return 0; } static int usb6fire_fw_ezusb_write(struct usb_device *device, int type, int value, char *data, int len) { return usb_control_msg_send(device, 0, type, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, data, len, 1000, GFP_KERNEL); } static int usb6fire_fw_ezusb_read(struct usb_device *device, int type, int value, char *data, int len) { return usb_control_msg_recv(device, 0, type, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, data, len, 1000, GFP_KERNEL); } static int usb6fire_fw_fpga_write(struct usb_device *device, char *data, int len) { int actual_len; int ret; ret = usb_bulk_msg(device, usb_sndbulkpipe(device, FPGA_EP), data, len, &actual_len, 1000); if (ret < 0) return ret; else if (actual_len != len) return -EIO; return 0; } static int usb6fire_fw_ezusb_upload( struct usb_interface *intf, const char *fwname, unsigned int postaddr, u8 *postdata, unsigned int postlen) { int ret; u8 data; struct usb_device *device = interface_to_usbdev(intf); const struct firmware *fw = NULL; struct ihex_record *rec = kmalloc(sizeof(struct ihex_record), GFP_KERNEL); if (!rec) return -ENOMEM; ret = request_firmware(&fw, fwname, &device->dev); if (ret < 0) { kfree(rec); dev_err(&intf->dev, "error requesting ezusb firmware %s.\n", fwname); return ret; } ret = usb6fire_fw_ihex_init(fw, rec); if (ret < 0) { kfree(rec); release_firmware(fw); dev_err(&intf->dev, "error validating ezusb firmware %s.\n", fwname); return ret; } /* upload firmware image */ data = 0x01; /* stop ezusb cpu */ ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1); if (ret) { kfree(rec); release_firmware(fw); dev_err(&intf->dev, "unable to upload ezusb firmware %s: begin message.\n", fwname); return ret; } while (usb6fire_fw_ihex_next_record(rec)) { /* write firmware */ ret = usb6fire_fw_ezusb_write(device, 0xa0, rec->address, rec->data, rec->len); if (ret) { kfree(rec); release_firmware(fw); dev_err(&intf->dev, "unable to upload ezusb firmware %s: data urb.\n", fwname); return ret; } } release_firmware(fw); kfree(rec); if (postdata) { /* write data after firmware has been uploaded */ ret = usb6fire_fw_ezusb_write(device, 0xa0, postaddr, postdata, postlen); if (ret) { dev_err(&intf->dev, "unable to upload ezusb firmware %s: post urb.\n", fwname); return ret; } } data = 0x00; /* resume ezusb cpu */ ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1); if (ret) { dev_err(&intf->dev, "unable to upload ezusb firmware %s: end message.\n", fwname); return ret; } return 0; } static int usb6fire_fw_fpga_upload( struct usb_interface *intf, const char *fwname) { int ret; int i; struct usb_device *device = interface_to_usbdev(intf); u8 *buffer = kmalloc(FPGA_BUFSIZE, GFP_KERNEL); const char *c; const char *end; const struct firmware *fw; if (!buffer) return -ENOMEM; ret = request_firmware(&fw, fwname, &device->dev); if (ret < 0) { dev_err(&intf->dev, "unable to get fpga firmware %s.\n", fwname); kfree(buffer); return -EIO; } c = fw->data; end = fw->data + fw->size; ret = usb6fire_fw_ezusb_write(device, 8, 0, NULL, 0); if (ret) { kfree(buffer); release_firmware(fw); dev_err(&intf->dev, "unable to upload fpga firmware: begin urb.\n"); return ret; } while (c != end) { for (i = 0; c != end && i < FPGA_BUFSIZE; i++, c++) buffer[i] = bitrev8((u8)*c); ret = usb6fire_fw_fpga_write(device, buffer, i); if (ret < 0) { release_firmware(fw); kfree(buffer); dev_err(&intf->dev, "unable to upload fpga firmware: fw urb.\n"); return ret; } } release_firmware(fw); kfree(buffer); ret = usb6fire_fw_ezusb_write(device, 9, 0, NULL, 0); if (ret) { dev_err(&intf->dev, "unable to upload fpga firmware: end urb.\n"); return ret; } return 0; } /* check, if the firmware version the devices has currently loaded * is known by this driver. 'version' needs to have 4 bytes version * info data. */ static int usb6fire_fw_check(struct usb_interface *intf, const u8 *version) { int i; for (i = 0; i < ARRAY_SIZE(known_fw_versions); i++) if (!memcmp(version, known_fw_versions + i, 2)) return 0; dev_err(&intf->dev, "invalid firmware version in device: %4ph. " "please reconnect to power. if this failure " "still happens, check your firmware installation.", version); return -EINVAL; } int usb6fire_fw_init(struct usb_interface *intf) { int i; int ret; struct usb_device *device = interface_to_usbdev(intf); /* buffer: 8 receiving bytes from device and * sizeof(EP_W_MAX_PACKET_SIZE) bytes for non-const copy */ u8 buffer[12]; ret = usb6fire_fw_ezusb_read(device, 1, 0, buffer, 8); if (ret) { dev_err(&intf->dev, "unable to receive device firmware state.\n"); return ret; } if (buffer[0] != 0xeb || buffer[1] != 0xaa || buffer[2] != 0x55) { dev_err(&intf->dev, "unknown device firmware state received from device:"); for (i = 0; i < 8; i++) printk(KERN_CONT "%02x ", buffer[i]); printk(KERN_CONT "\n"); return -EIO; } /* do we need fpga loader ezusb firmware? */ if (buffer[3] == 0x01) { ret = usb6fire_fw_ezusb_upload(intf, "6fire/dmx6firel2.ihx", 0, NULL, 0); if (ret < 0) return ret; return FW_NOT_READY; } /* do we need fpga firmware and application ezusb firmware? */ else if (buffer[3] == 0x02) { ret = usb6fire_fw_check(intf, buffer + 4); if (ret < 0) return ret; ret = usb6fire_fw_fpga_upload(intf, "6fire/dmx6firecf.bin"); if (ret < 0) return ret; memcpy(buffer, ep_w_max_packet_size, sizeof(ep_w_max_packet_size)); ret = usb6fire_fw_ezusb_upload(intf, "6fire/dmx6fireap.ihx", 0x0003, buffer, sizeof(ep_w_max_packet_size)); if (ret < 0) return ret; return FW_NOT_READY; } /* all fw loaded? */ else if (buffer[3] == 0x03) return usb6fire_fw_check(intf, buffer + 4); /* unknown data? */ else { dev_err(&intf->dev, "unknown device firmware state received from device: "); for (i = 0; i < 8; i++) printk(KERN_CONT "%02x ", buffer[i]); printk(KERN_CONT "\n"); return -EIO; } return 0; } |
42 14 22 14 14 3 3 3 3 3 3 3 3 3 3 3 7 42 7 42 3 3 3 3 3 3 3 3 3 3 3 2 42 40 24 3 40 40 18 40 40 40 35 3 3 3 3 3 2 35 35 42 42 42 42 42 42 42 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 | // SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 generic device routines. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/log2.h> #include <linux/of.h> #include <linux/of_net.h> #include "rt2x00.h" #include "rt2x00lib.h" /* * Utility functions. */ u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif) { /* * When in STA mode, bssidx is always 0 otherwise local_address[5] * contains the bss number, see BSS_ID_MASK comments for details. */ if (rt2x00dev->intf_sta_count) return 0; return vif->addr[5] & (rt2x00dev->ops->max_ap_intf - 1); } EXPORT_SYMBOL_GPL(rt2x00lib_get_bssidx); /* * Radio control handlers. */ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) { int status; /* * Don't enable the radio twice. * And check if the hardware button has been disabled. */ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return 0; /* * Initialize all data queues. */ rt2x00queue_init_queues(rt2x00dev); /* * Enable radio. */ status = rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_ON); if (status) return status; rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_ON); rt2x00leds_led_radio(rt2x00dev, true); rt2x00led_led_activity(rt2x00dev, true); set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags); /* * Enable queues. */ rt2x00queue_start_queues(rt2x00dev); rt2x00link_start_tuner(rt2x00dev); /* * Start watchdog monitoring. */ rt2x00link_start_watchdog(rt2x00dev); return 0; } void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) { if (!test_and_clear_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; /* * Stop watchdog monitoring. */ rt2x00link_stop_watchdog(rt2x00dev); /* * Stop all queues */ rt2x00link_stop_tuner(rt2x00dev); rt2x00queue_stop_queues(rt2x00dev); rt2x00queue_flush_queues(rt2x00dev, true); rt2x00queue_stop_queue(rt2x00dev->bcn); /* * Disable radio. */ rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF); rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF); rt2x00led_led_activity(rt2x00dev, false); rt2x00leds_led_radio(rt2x00dev, false); } static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = data; struct rt2x00_intf *intf = vif_to_intf(vif); /* * It is possible the radio was disabled while the work had been * scheduled. If that happens we should return here immediately, * note that in the spinlock protected area above the delayed_flags * have been cleared correctly. */ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags)) { mutex_lock(&intf->beacon_skb_mutex); rt2x00queue_update_beacon(rt2x00dev, vif); mutex_unlock(&intf->beacon_skb_mutex); } } static void rt2x00lib_intf_scheduled(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, intf_work); /* * Iterate over each interface and perform the * requested configurations. */ ieee80211_iterate_active_interfaces(rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00lib_intf_scheduled_iter, rt2x00dev); } static void rt2x00lib_autowakeup(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, autowakeup_work.work); if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) rt2x00_err(rt2x00dev, "Device failed to wakeup\n"); clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); } /* * Interrupt context handlers. */ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ieee80211_tx_control control = {}; struct rt2x00_dev *rt2x00dev = data; struct sk_buff *skb; /* * Only AP mode interfaces do broad- and multicast buffering */ if (vif->type != NL80211_IFTYPE_AP) return; /* * Send out buffered broad- and multicast frames */ skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); while (skb) { rt2x00mac_tx(rt2x00dev->hw, &control, skb); skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); } } static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = data; if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC && vif->type != NL80211_IFTYPE_MESH_POINT) return; /* * Update the beacon without locking. This is safe on PCI devices * as they only update the beacon periodically here. This should * never be called for USB devices. */ WARN_ON(rt2x00_is_usb(rt2x00dev)); rt2x00queue_update_beacon(rt2x00dev, vif); } void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) { if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; /* send buffered bc/mc frames out for every bssid */ ieee80211_iterate_active_interfaces_atomic( rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00lib_bc_buffer_iter, rt2x00dev); /* * Devices with pre tbtt interrupt don't need to update the beacon * here as they will fetch the next beacon directly prior to * transmission. */ if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev)) return; /* fetch next beacon */ ieee80211_iterate_active_interfaces_atomic( rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00lib_beaconupdate_iter, rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev) { if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; /* fetch next beacon */ ieee80211_iterate_active_interfaces_atomic( rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00lib_beaconupdate_iter, rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); void rt2x00lib_dmastart(struct queue_entry *entry) { set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00queue_index_inc(entry, Q_INDEX); } EXPORT_SYMBOL_GPL(rt2x00lib_dmastart); void rt2x00lib_dmadone(struct queue_entry *entry) { set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags); clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00queue_index_inc(entry, Q_INDEX_DMA_DONE); } EXPORT_SYMBOL_GPL(rt2x00lib_dmadone); static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_bar *bar = (void *) entry->skb->data; struct rt2x00_bar_list_entry *bar_entry; int ret; if (likely(!ieee80211_is_back_req(bar->frame_control))) return 0; /* * Unlike all other frames, the status report for BARs does * not directly come from the hardware as it is incapable of * matching a BA to a previously send BAR. The hardware will * report all BARs as if they weren't acked at all. * * Instead the RX-path will scan for incoming BAs and set the * block_acked flag if it sees one that was likely caused by * a BAR from us. * * Remove remaining BARs here and return their status for * TX done processing. */ ret = 0; rcu_read_lock(); list_for_each_entry_rcu(bar_entry, &rt2x00dev->bar_list, list) { if (bar_entry->entry != entry) continue; spin_lock_bh(&rt2x00dev->bar_list_lock); /* Return whether this BAR was blockacked or not */ ret = bar_entry->block_acked; /* Remove the BAR from our checklist */ list_del_rcu(&bar_entry->list); spin_unlock_bh(&rt2x00dev->bar_list_lock); kfree_rcu(bar_entry, head); break; } rcu_read_unlock(); return ret; } static void rt2x00lib_fill_tx_status(struct rt2x00_dev *rt2x00dev, struct ieee80211_tx_info *tx_info, struct skb_frame_desc *skbdesc, struct txdone_entry_desc *txdesc, bool success) { u8 rate_idx, rate_flags, retry_rates; int i; rate_idx = skbdesc->tx_rate_idx; rate_flags = skbdesc->tx_rate_flags; retry_rates = test_bit(TXDONE_FALLBACK, &txdesc->flags) ? (txdesc->retry + 1) : 1; /* * Initialize TX status */ memset(&tx_info->status, 0, sizeof(tx_info->status)); tx_info->status.ack_signal = 0; /* * Frame was send with retries, hardware tried * different rates to send out the frame, at each * retry it lowered the rate 1 step except when the * lowest rate was used. */ for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) { tx_info->status.rates[i].idx = rate_idx - i; tx_info->status.rates[i].flags = rate_flags; if (rate_idx - i == 0) { /* * The lowest rate (index 0) was used until the * number of max retries was reached. */ tx_info->status.rates[i].count = retry_rates - i; i++; break; } tx_info->status.rates[i].count = 1; } if (i < (IEEE80211_TX_MAX_RATES - 1)) tx_info->status.rates[i].idx = -1; /* terminate */ if (test_bit(TXDONE_NO_ACK_REQ, &txdesc->flags)) tx_info->flags |= IEEE80211_TX_CTL_NO_ACK; if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) { if (success) tx_info->flags |= IEEE80211_TX_STAT_ACK; else rt2x00dev->low_level_stats.dot11ACKFailureCount++; } /* * Every single frame has it's own tx status, hence report * every frame as ampdu of size 1. * * TODO: if we can find out how many frames were aggregated * by the hw we could provide the real ampdu_len to mac80211 * which would allow the rc algorithm to better decide on * which rates are suitable. */ if (test_bit(TXDONE_AMPDU, &txdesc->flags) || tx_info->flags & IEEE80211_TX_CTL_AMPDU) { tx_info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; tx_info->status.ampdu_len = 1; tx_info->status.ampdu_ack_len = success ? 1 : 0; } if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { if (success) rt2x00dev->low_level_stats.dot11RTSSuccessCount++; else rt2x00dev->low_level_stats.dot11RTSFailureCount++; } } static void rt2x00lib_clear_entry(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry) { /* * Make this entry available for reuse. */ entry->skb = NULL; entry->flags = 0; rt2x00dev->ops->lib->clear_entry(entry); rt2x00queue_index_inc(entry, Q_INDEX_DONE); /* * If the data queue was below the threshold before the txdone * handler we must make sure the packet queue in the mac80211 stack * is reenabled when the txdone handler has finished. This has to be * serialized with rt2x00mac_tx(), otherwise we can wake up queue * before it was stopped. */ spin_lock_bh(&entry->queue->tx_lock); if (!rt2x00queue_threshold(entry->queue)) rt2x00queue_unpause_queue(entry->queue); spin_unlock_bh(&entry->queue->tx_lock); } void rt2x00lib_txdone_nomatch(struct queue_entry *entry, struct txdone_entry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct ieee80211_tx_info txinfo = {}; bool success; /* * Unmap the skb. */ rt2x00queue_unmap_skb(entry); /* * Signal that the TX descriptor is no longer in the skb. */ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; /* * Send frame to debugfs immediately, after this call is completed * we are going to overwrite the skb->cb array. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry); /* * Determine if the frame has been successfully transmitted and * remove BARs from our check list while checking for their * TX status. */ success = rt2x00lib_txdone_bar_status(entry) || test_bit(TXDONE_SUCCESS, &txdesc->flags); if (!test_bit(TXDONE_UNKNOWN, &txdesc->flags)) { /* * Update TX statistics. */ rt2x00dev->link.qual.tx_success += success; rt2x00dev->link.qual.tx_failed += !success; rt2x00lib_fill_tx_status(rt2x00dev, &txinfo, skbdesc, txdesc, success); ieee80211_tx_status_noskb(rt2x00dev->hw, skbdesc->sta, &txinfo); } dev_kfree_skb_any(entry->skb); rt2x00lib_clear_entry(rt2x00dev, entry); } EXPORT_SYMBOL_GPL(rt2x00lib_txdone_nomatch); void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); u8 skbdesc_flags = skbdesc->flags; unsigned int header_length; bool success; /* * Unmap the skb. */ rt2x00queue_unmap_skb(entry); /* * Remove the extra tx headroom from the skb. */ skb_pull(entry->skb, rt2x00dev->extra_tx_headroom); /* * Signal that the TX descriptor is no longer in the skb. */ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; /* * Determine the length of 802.11 header. */ header_length = ieee80211_get_hdrlen_from_skb(entry->skb); /* * Remove L2 padding which was added during */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD)) rt2x00queue_remove_l2pad(entry->skb, header_length); /* * If the IV/EIV data was stripped from the frame before it was * passed to the hardware, we should now reinsert it again because * mac80211 will expect the same data to be present it the * frame as it was passed to us. */ if (rt2x00_has_cap_hw_crypto(rt2x00dev)) rt2x00crypto_tx_insert_iv(entry->skb, header_length); /* * Send frame to debugfs immediately, after this call is completed * we are going to overwrite the skb->cb array. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry); /* * Determine if the frame has been successfully transmitted and * remove BARs from our check list while checking for their * TX status. */ success = rt2x00lib_txdone_bar_status(entry) || test_bit(TXDONE_SUCCESS, &txdesc->flags) || test_bit(TXDONE_UNKNOWN, &txdesc->flags); /* * Update TX statistics. */ rt2x00dev->link.qual.tx_success += success; rt2x00dev->link.qual.tx_failed += !success; rt2x00lib_fill_tx_status(rt2x00dev, tx_info, skbdesc, txdesc, success); /* * Only send the status report to mac80211 when it's a frame * that originated in mac80211. If this was a extra frame coming * through a mac80211 library call (RTS/CTS) then we should not * send the status report back. */ if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT)) ieee80211_tx_status_skb(rt2x00dev->hw, entry->skb); else ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); } else { dev_kfree_skb_any(entry->skb); } rt2x00lib_clear_entry(rt2x00dev, entry); } EXPORT_SYMBOL_GPL(rt2x00lib_txdone); void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status) { struct txdone_entry_desc txdesc; txdesc.flags = 0; __set_bit(status, &txdesc.flags); txdesc.retry = 0; rt2x00lib_txdone(entry, &txdesc); } EXPORT_SYMBOL_GPL(rt2x00lib_txdone_noinfo); static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie) { struct ieee80211_mgmt *mgmt = (void *)data; u8 *pos, *end; pos = (u8 *)mgmt->u.beacon.variable; end = data + len; while (pos < end) { if (pos + 2 + pos[1] > end) return NULL; if (pos[0] == ie) return pos; pos += 2 + pos[1]; } return NULL; } static void rt2x00lib_sleep(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, sleep_work); if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; /* * Check again is powersaving is enabled, to prevent races from delayed * work execution. */ if (!test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, IEEE80211_CONF_CHANGE_PS); } static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) { struct rt2x00_bar_list_entry *entry; struct ieee80211_bar *ba = (void *)skb->data; if (likely(!ieee80211_is_back(ba->frame_control))) return; if (rxdesc->size < sizeof(*ba) + FCS_LEN) return; rcu_read_lock(); list_for_each_entry_rcu(entry, &rt2x00dev->bar_list, list) { if (ba->start_seq_num != entry->start_seq_num) continue; #define TID_CHECK(a, b) ( \ ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \ ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \ if (!TID_CHECK(ba->control, entry->control)) continue; #undef TID_CHECK if (!ether_addr_equal_64bits(ba->ra, entry->ta)) continue; if (!ether_addr_equal_64bits(ba->ta, entry->ra)) continue; /* Mark BAR since we received the according BA */ spin_lock_bh(&rt2x00dev->bar_list_lock); entry->block_acked = 1; spin_unlock_bh(&rt2x00dev->bar_list_lock); break; } rcu_read_unlock(); } static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) { struct ieee80211_hdr *hdr = (void *) skb->data; struct ieee80211_tim_ie *tim_ie; u8 *tim; u8 tim_len; bool cam; /* If this is not a beacon, or if mac80211 has no powersaving * configured, or if the device is already in powersaving mode * we can exit now. */ if (likely(!ieee80211_is_beacon(hdr->frame_control) || !(rt2x00dev->hw->conf.flags & IEEE80211_CONF_PS))) return; /* min. beacon length + FCS_LEN */ if (skb->len <= 40 + FCS_LEN) return; /* and only beacons from the associated BSSID, please */ if (!(rxdesc->dev_flags & RXDONE_MY_BSS) || !rt2x00dev->aid) return; rt2x00dev->last_beacon = jiffies; tim = rt2x00lib_find_ie(skb->data, skb->len - FCS_LEN, WLAN_EID_TIM); if (!tim) return; if (tim[1] < sizeof(*tim_ie)) return; tim_len = tim[1]; tim_ie = (struct ieee80211_tim_ie *) &tim[2]; /* Check whenever the PHY can be turned off again. */ /* 1. What about buffered unicast traffic for our AID? */ cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid); /* 2. Maybe the AP wants to send multicast/broadcast data? */ cam |= (tim_ie->bitmap_ctrl & 0x01); if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) queue_work(rt2x00dev->workqueue, &rt2x00dev->sleep_work); } static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, struct rxdone_entry_desc *rxdesc) { struct ieee80211_supported_band *sband; const struct rt2x00_rate *rate; unsigned int i; int signal = rxdesc->signal; int type = (rxdesc->dev_flags & RXDONE_SIGNAL_MASK); switch (rxdesc->rate_mode) { case RATE_MODE_CCK: case RATE_MODE_OFDM: /* * For non-HT rates the MCS value needs to contain the * actually used rate modulation (CCK or OFDM). */ if (rxdesc->dev_flags & RXDONE_SIGNAL_MCS) signal = RATE_MCS(rxdesc->rate_mode, signal); sband = &rt2x00dev->bands[rt2x00dev->curr_band]; for (i = 0; i < sband->n_bitrates; i++) { rate = rt2x00_get_rate(sband->bitrates[i].hw_value); if (((type == RXDONE_SIGNAL_PLCP) && (rate->plcp == signal)) || ((type == RXDONE_SIGNAL_BITRATE) && (rate->bitrate == signal)) || ((type == RXDONE_SIGNAL_MCS) && (rate->mcs == signal))) { return i; } } break; case RATE_MODE_HT_MIX: case RATE_MODE_HT_GREENFIELD: if (signal >= 0 && signal <= 76) return signal; break; default: break; } rt2x00_warn(rt2x00dev, "Frame received with unrecognized signal, mode=0x%.4x, signal=0x%.4x, type=%d\n", rxdesc->rate_mode, signal, type); return 0; } void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct rxdone_entry_desc rxdesc; struct sk_buff *skb; struct ieee80211_rx_status *rx_status; unsigned int header_length; int rate_idx; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) goto submit_entry; if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) goto submit_entry; /* * Allocate a new sk_buffer. If no new buffer available, drop the * received frame and reuse the existing buffer. */ skb = rt2x00queue_alloc_rxskb(entry, gfp); if (!skb) goto submit_entry; /* * Unmap the skb. */ rt2x00queue_unmap_skb(entry); /* * Extract the RXD details. */ memset(&rxdesc, 0, sizeof(rxdesc)); rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); /* * Check for valid size in case we get corrupted descriptor from * hardware. */ if (unlikely(rxdesc.size == 0 || rxdesc.size > entry->queue->data_size)) { rt2x00_err(rt2x00dev, "Wrong frame size %d max %d\n", rxdesc.size, entry->queue->data_size); dev_kfree_skb(entry->skb); goto renew_skb; } /* * The data behind the ieee80211 header must be * aligned on a 4 byte boundary. */ header_length = ieee80211_get_hdrlen_from_skb(entry->skb); /* * Hardware might have stripped the IV/EIV/ICV data, * in that case it is possible that the data was * provided separately (through hardware descriptor) * in which case we should reinsert the data into the frame. */ if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) && (rxdesc.flags & RX_FLAG_IV_STRIPPED)) rt2x00crypto_rx_insert_iv(entry->skb, header_length, &rxdesc); else if (header_length && (rxdesc.size > header_length) && (rxdesc.dev_flags & RXDONE_L2PAD)) rt2x00queue_remove_l2pad(entry->skb, header_length); /* Trim buffer to correct size */ skb_trim(entry->skb, rxdesc.size); /* * Translate the signal to the correct bitrate index. */ rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc); if (rxdesc.rate_mode == RATE_MODE_HT_MIX || rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD) rxdesc.encoding = RX_ENC_HT; /* * Check if this is a beacon, and more frames have been * buffered while we were in powersaving mode. */ rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc); /* * Check for incoming BlockAcks to match to the BlockAckReqs * we've send out. */ rt2x00lib_rxdone_check_ba(rt2x00dev, entry->skb, &rxdesc); /* * Update extra components */ rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); rt2x00debug_update_crypto(rt2x00dev, &rxdesc); rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry); /* * Initialize RX status information, and send frame * to mac80211. */ rx_status = IEEE80211_SKB_RXCB(entry->skb); /* Ensure that all fields of rx_status are initialized * properly. The skb->cb array was used for driver * specific informations, so rx_status might contain * garbage. */ memset(rx_status, 0, sizeof(*rx_status)); rx_status->mactime = rxdesc.timestamp; rx_status->band = rt2x00dev->curr_band; rx_status->freq = rt2x00dev->curr_freq; rx_status->rate_idx = rate_idx; rx_status->signal = rxdesc.rssi; rx_status->flag = rxdesc.flags; rx_status->enc_flags = rxdesc.enc_flags; rx_status->encoding = rxdesc.encoding; rx_status->bw = rxdesc.bw; rx_status->antenna = rt2x00dev->link.ant.active.rx; ieee80211_rx_ni(rt2x00dev->hw, entry->skb); renew_skb: /* * Replace the skb with the freshly allocated one. */ entry->skb = skb; submit_entry: entry->flags = 0; rt2x00queue_index_inc(entry, Q_INDEX_DONE); if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2x00dev->ops->lib->clear_entry(entry); } EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); /* * Driver initialization handlers. */ const struct rt2x00_rate rt2x00_supported_rates[12] = { { .flags = DEV_RATE_CCK, .bitrate = 10, .ratemask = BIT(0), .plcp = 0x00, .mcs = RATE_MCS(RATE_MODE_CCK, 0), }, { .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE, .bitrate = 20, .ratemask = BIT(1), .plcp = 0x01, .mcs = RATE_MCS(RATE_MODE_CCK, 1), }, { .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE, .bitrate = 55, .ratemask = BIT(2), .plcp = 0x02, .mcs = RATE_MCS(RATE_MODE_CCK, 2), }, { .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE, .bitrate = 110, .ratemask = BIT(3), .plcp = 0x03, .mcs = RATE_MCS(RATE_MODE_CCK, 3), }, { .flags = DEV_RATE_OFDM, .bitrate = 60, .ratemask = BIT(4), .plcp = 0x0b, .mcs = RATE_MCS(RATE_MODE_OFDM, 0), }, { .flags = DEV_RATE_OFDM, .bitrate = 90, .ratemask = BIT(5), .plcp = 0x0f, .mcs = RATE_MCS(RATE_MODE_OFDM, 1), }, { .flags = DEV_RATE_OFDM, .bitrate = 120, .ratemask = BIT(6), .plcp = 0x0a, .mcs = RATE_MCS(RATE_MODE_OFDM, 2), }, { .flags = DEV_RATE_OFDM, .bitrate = 180, .ratemask = BIT(7), .plcp = 0x0e, .mcs = RATE_MCS(RATE_MODE_OFDM, 3), }, { .flags = DEV_RATE_OFDM, .bitrate = 240, .ratemask = BIT(8), .plcp = 0x09, .mcs = RATE_MCS(RATE_MODE_OFDM, 4), }, { .flags = DEV_RATE_OFDM, .bitrate = 360, .ratemask = BIT(9), .plcp = 0x0d, .mcs = RATE_MCS(RATE_MODE_OFDM, 5), }, { .flags = DEV_RATE_OFDM, .bitrate = 480, .ratemask = BIT(10), .plcp = 0x08, .mcs = RATE_MCS(RATE_MODE_OFDM, 6), }, { .flags = DEV_RATE_OFDM, .bitrate = 540, .ratemask = BIT(11), .plcp = 0x0c, .mcs = RATE_MCS(RATE_MODE_OFDM, 7), }, }; static void rt2x00lib_channel(struct ieee80211_channel *entry, const int channel, const int tx_power, const int value) { /* XXX: this assumption about the band is wrong for 802.11j */ entry->band = channel <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; entry->center_freq = ieee80211_channel_to_frequency(channel, entry->band); entry->hw_value = value; entry->max_power = tx_power; entry->max_antenna_gain = 0xff; } static void rt2x00lib_rate(struct ieee80211_rate *entry, const u16 index, const struct rt2x00_rate *rate) { entry->flags = 0; entry->bitrate = rate->bitrate; entry->hw_value = index; entry->hw_value_short = index; if (rate->flags & DEV_RATE_SHORT_PREAMBLE) entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE; } void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) { of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr); if (!is_valid_ether_addr(eeprom_mac_addr)) { eth_random_addr(eeprom_mac_addr); rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", eeprom_mac_addr); } } EXPORT_SYMBOL_GPL(rt2x00lib_set_mac_address); static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, struct hw_mode_spec *spec) { struct ieee80211_hw *hw = rt2x00dev->hw; struct ieee80211_channel *channels; struct ieee80211_rate *rates; unsigned int num_rates; unsigned int i; num_rates = 0; if (spec->supported_rates & SUPPORT_RATE_CCK) num_rates += 4; if (spec->supported_rates & SUPPORT_RATE_OFDM) num_rates += 8; channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL); if (!channels) return -ENOMEM; rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL); if (!rates) goto exit_free_channels; /* * Initialize Rate list. */ for (i = 0; i < num_rates; i++) rt2x00lib_rate(&rates[i], i, rt2x00_get_rate(i)); /* * Initialize Channel list. */ for (i = 0; i < spec->num_channels; i++) { rt2x00lib_channel(&channels[i], spec->channels[i].channel, spec->channels_info[i].max_power, i); } /* * Intitialize 802.11b, 802.11g * Rates: CCK, OFDM. * Channels: 2.4 GHz */ if (spec->supported_bands & SUPPORT_BAND_2GHZ) { rt2x00dev->bands[NL80211_BAND_2GHZ].n_channels = 14; rt2x00dev->bands[NL80211_BAND_2GHZ].n_bitrates = num_rates; rt2x00dev->bands[NL80211_BAND_2GHZ].channels = channels; rt2x00dev->bands[NL80211_BAND_2GHZ].bitrates = rates; hw->wiphy->bands[NL80211_BAND_2GHZ] = &rt2x00dev->bands[NL80211_BAND_2GHZ]; memcpy(&rt2x00dev->bands[NL80211_BAND_2GHZ].ht_cap, &spec->ht, sizeof(spec->ht)); } /* * Intitialize 802.11a * Rates: OFDM. * Channels: OFDM, UNII, HiperLAN2. */ if (spec->supported_bands & SUPPORT_BAND_5GHZ) { rt2x00dev->bands[NL80211_BAND_5GHZ].n_channels = spec->num_channels - 14; rt2x00dev->bands[NL80211_BAND_5GHZ].n_bitrates = num_rates - 4; rt2x00dev->bands[NL80211_BAND_5GHZ].channels = &channels[14]; rt2x00dev->bands[NL80211_BAND_5GHZ].bitrates = &rates[4]; hw->wiphy->bands[NL80211_BAND_5GHZ] = &rt2x00dev->bands[NL80211_BAND_5GHZ]; memcpy(&rt2x00dev->bands[NL80211_BAND_5GHZ].ht_cap, &spec->ht, sizeof(spec->ht)); } return 0; exit_free_channels: kfree(channels); rt2x00_err(rt2x00dev, "Allocation ieee80211 modes failed\n"); return -ENOMEM; } static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev) { if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags)) ieee80211_unregister_hw(rt2x00dev->hw); if (likely(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ])) { kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels); kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates); rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; rt2x00dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; } kfree(rt2x00dev->spec.channels_info); kfree(rt2x00dev->chan_survey); } static const struct ieee80211_tpt_blink rt2x00_tpt_blink[] = { { .throughput = 0 * 1024, .blink_time = 334 }, { .throughput = 1 * 1024, .blink_time = 260 }, { .throughput = 2 * 1024, .blink_time = 220 }, { .throughput = 5 * 1024, .blink_time = 190 }, { .throughput = 10 * 1024, .blink_time = 170 }, { .throughput = 25 * 1024, .blink_time = 150 }, { .throughput = 54 * 1024, .blink_time = 130 }, { .throughput = 120 * 1024, .blink_time = 110 }, { .throughput = 265 * 1024, .blink_time = 80 }, { .throughput = 586 * 1024, .blink_time = 50 }, }; static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; int status; if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags)) return 0; /* * Initialize HW modes. */ status = rt2x00lib_probe_hw_modes(rt2x00dev, spec); if (status) return status; /* * Initialize HW fields. */ rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues; /* * Initialize extra TX headroom required. */ rt2x00dev->hw->extra_tx_headroom = max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM, rt2x00dev->extra_tx_headroom); /* * Take TX headroom required for alignment into account. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD)) rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE; else if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE; /* * Tell mac80211 about the size of our private STA structure. */ rt2x00dev->hw->sta_data_size = sizeof(struct rt2x00_sta); /* * Allocate tx status FIFO for driver use. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO)) { /* * Allocate the txstatus fifo. In the worst case the tx * status fifo has to hold the tx status of all entries * in all tx queues. Hence, calculate the kfifo size as * tx_queues * entry_num and round up to the nearest * power of 2. */ int kfifo_size = roundup_pow_of_two(rt2x00dev->ops->tx_queues * rt2x00dev->tx->limit * sizeof(u32)); status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size, GFP_KERNEL); if (status) return status; } /* * Initialize tasklets if used by the driver. Tasklets are * disabled until the interrupts are turned on. The driver * has to handle that. */ #define RT2X00_TASKLET_INIT(taskletname) \ if (rt2x00dev->ops->lib->taskletname) { \ tasklet_setup(&rt2x00dev->taskletname, \ rt2x00dev->ops->lib->taskletname); \ } RT2X00_TASKLET_INIT(txstatus_tasklet); RT2X00_TASKLET_INIT(pretbtt_tasklet); RT2X00_TASKLET_INIT(tbtt_tasklet); RT2X00_TASKLET_INIT(rxdone_tasklet); RT2X00_TASKLET_INIT(autowake_tasklet); #undef RT2X00_TASKLET_INIT ieee80211_create_tpt_led_trigger(rt2x00dev->hw, IEEE80211_TPT_LEDTRIG_FL_RADIO, rt2x00_tpt_blink, ARRAY_SIZE(rt2x00_tpt_blink)); /* * Register HW. */ status = ieee80211_register_hw(rt2x00dev->hw); if (status) return status; set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags); return 0; } /* * Initialization/uninitialization handlers. */ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) { if (!test_and_clear_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags)) return; /* * Stop rfkill polling. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL)) rt2x00rfkill_unregister(rt2x00dev); /* * Allow the HW to uninitialize. */ rt2x00dev->ops->lib->uninitialize(rt2x00dev); /* * Free allocated queue entries. */ rt2x00queue_uninitialize(rt2x00dev); } static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) { int status; if (test_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags)) return 0; /* * Allocate all queue entries. */ status = rt2x00queue_initialize(rt2x00dev); if (status) return status; /* * Initialize the device. */ status = rt2x00dev->ops->lib->initialize(rt2x00dev); if (status) { rt2x00queue_uninitialize(rt2x00dev); return status; } set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags); /* * Start rfkill polling. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL)) rt2x00rfkill_register(rt2x00dev); return 0; } int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) { int retval = 0; /* * If this is the first interface which is added, * we should load the firmware now. */ retval = rt2x00lib_load_firmware(rt2x00dev); if (retval) goto out; /* * Initialize the device. */ retval = rt2x00lib_initialize(rt2x00dev); if (retval) goto out; rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; rt2x00dev->intf_associated = 0; rt2x00dev->intf_beaconing = 0; /* Enable the radio */ retval = rt2x00lib_enable_radio(rt2x00dev); if (retval) goto out; set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); out: return retval; } void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) { if (!test_and_clear_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) return; /* * Perhaps we can add something smarter here, * but for now just disabling the radio should do. */ rt2x00lib_disable_radio(rt2x00dev); rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; rt2x00dev->intf_associated = 0; rt2x00dev->intf_beaconing = 0; } static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev) { struct ieee80211_iface_limit *if_limit; struct ieee80211_iface_combination *if_combination; if (rt2x00dev->ops->max_ap_intf < 2) return; /* * Build up AP interface limits structure. */ if_limit = &rt2x00dev->if_limits_ap; if_limit->max = rt2x00dev->ops->max_ap_intf; if_limit->types = BIT(NL80211_IFTYPE_AP); #ifdef CONFIG_MAC80211_MESH if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT); #endif /* * Build up AP interface combinations structure. */ if_combination = &rt2x00dev->if_combinations[IF_COMB_AP]; if_combination->limits = if_limit; if_combination->n_limits = 1; if_combination->max_interfaces = if_limit->max; if_combination->num_different_channels = 1; /* * Finally, specify the possible combinations to mac80211. */ rt2x00dev->hw->wiphy->iface_combinations = rt2x00dev->if_combinations; rt2x00dev->hw->wiphy->n_iface_combinations = 1; } static unsigned int rt2x00dev_extra_tx_headroom(struct rt2x00_dev *rt2x00dev) { if (WARN_ON(!rt2x00dev->tx)) return 0; if (rt2x00_is_usb(rt2x00dev)) return rt2x00dev->tx[0].winfo_size + rt2x00dev->tx[0].desc_size; return rt2x00dev->tx[0].winfo_size; } /* * driver allocation handlers. */ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) { int retval = -ENOMEM; /* * Set possible interface combinations. */ rt2x00lib_set_if_combinations(rt2x00dev); /* * Allocate the driver data memory, if necessary. */ if (rt2x00dev->ops->drv_data_size > 0) { rt2x00dev->drv_data = kzalloc(rt2x00dev->ops->drv_data_size, GFP_KERNEL); if (!rt2x00dev->drv_data) { retval = -ENOMEM; goto exit; } } spin_lock_init(&rt2x00dev->irqmask_lock); mutex_init(&rt2x00dev->csr_mutex); mutex_init(&rt2x00dev->conf_mutex); INIT_LIST_HEAD(&rt2x00dev->bar_list); spin_lock_init(&rt2x00dev->bar_list_lock); hrtimer_setup(&rt2x00dev->txstatus_timer, hrtimer_dummy_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL); set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); /* * Make room for rt2x00_intf inside the per-interface * structure ieee80211_vif. */ rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf); /* * rt2x00 devices can only use the last n bits of the MAC address * for virtual interfaces. */ rt2x00dev->hw->wiphy->addr_mask[ETH_ALEN - 1] = (rt2x00dev->ops->max_ap_intf - 1); /* * Initialize work. */ rt2x00dev->workqueue = alloc_ordered_workqueue("%s", 0, wiphy_name(rt2x00dev->hw->wiphy)); if (!rt2x00dev->workqueue) { retval = -ENOMEM; goto exit; } INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup); INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep); /* * Let the driver probe the device to detect the capabilities. */ retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev); if (retval) { rt2x00_err(rt2x00dev, "Failed to allocate device\n"); goto exit; } /* * Allocate queue array. */ retval = rt2x00queue_allocate(rt2x00dev); if (retval) goto exit; /* Cache TX headroom value */ rt2x00dev->extra_tx_headroom = rt2x00dev_extra_tx_headroom(rt2x00dev); /* * Determine which operating modes are supported, all modes * which require beaconing, depend on the availability of * beacon entries. */ rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); if (rt2x00dev->bcn->limit > 0) rt2x00dev->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC) | #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif BIT(NL80211_IFTYPE_AP); rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(rt2x00dev->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); /* * Initialize ieee80211 structure. */ retval = rt2x00lib_probe_hw(rt2x00dev); if (retval) { rt2x00_err(rt2x00dev, "Failed to initialize hw\n"); goto exit; } /* * Register extra components. */ rt2x00link_register(rt2x00dev); rt2x00leds_register(rt2x00dev); rt2x00debug_register(rt2x00dev); /* * Start rfkill polling. */ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL)) rt2x00rfkill_register(rt2x00dev); return 0; exit: rt2x00lib_remove_dev(rt2x00dev); return retval; } EXPORT_SYMBOL_GPL(rt2x00lib_probe_dev); void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) { clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); /* * Stop rfkill polling. */ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL)) rt2x00rfkill_unregister(rt2x00dev); /* * Disable radio. */ rt2x00lib_disable_radio(rt2x00dev); /* * Stop all work. */ cancel_work_sync(&rt2x00dev->intf_work); cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); cancel_work_sync(&rt2x00dev->sleep_work); hrtimer_cancel(&rt2x00dev->txstatus_timer); /* * Kill the tx status tasklet. */ tasklet_kill(&rt2x00dev->txstatus_tasklet); tasklet_kill(&rt2x00dev->pretbtt_tasklet); tasklet_kill(&rt2x00dev->tbtt_tasklet); tasklet_kill(&rt2x00dev->rxdone_tasklet); tasklet_kill(&rt2x00dev->autowake_tasklet); /* * Uninitialize device. */ rt2x00lib_uninitialize(rt2x00dev); if (rt2x00dev->workqueue) destroy_workqueue(rt2x00dev->workqueue); /* * Free the tx status fifo. */ kfifo_free(&rt2x00dev->txstatus_fifo); /* * Free extra components */ rt2x00debug_deregister(rt2x00dev); rt2x00leds_unregister(rt2x00dev); /* * Free ieee80211_hw memory. */ rt2x00lib_remove_hw(rt2x00dev); /* * Free firmware image. */ rt2x00lib_free_firmware(rt2x00dev); /* * Free queue structures. */ rt2x00queue_free(rt2x00dev); /* * Free the driver data. */ kfree(rt2x00dev->drv_data); } EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev); /* * Device state handlers */ int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev) { rt2x00_dbg(rt2x00dev, "Going to sleep\n"); /* * Prevent mac80211 from accessing driver while suspended. */ if (!test_and_clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; /* * Cleanup as much as possible. */ rt2x00lib_uninitialize(rt2x00dev); /* * Suspend/disable extra components. */ rt2x00leds_suspend(rt2x00dev); rt2x00debug_deregister(rt2x00dev); /* * Set device mode to sleep for power management, * on some hardware this call seems to consistently fail. * From the specifications it is hard to tell why it fails, * and if this is a "bad thing". * Overall it is safe to just ignore the failure and * continue suspending. The only downside is that the * device will not be in optimal power save mode, but with * the radio and the other components already disabled the * device is as good as disabled. */ if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP)) rt2x00_warn(rt2x00dev, "Device failed to enter sleep state, continue suspending\n"); return 0; } EXPORT_SYMBOL_GPL(rt2x00lib_suspend); int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) { rt2x00_dbg(rt2x00dev, "Waking up\n"); /* * Restore/enable extra components. */ rt2x00debug_register(rt2x00dev); rt2x00leds_resume(rt2x00dev); /* * We are ready again to receive requests from mac80211. */ set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); return 0; } EXPORT_SYMBOL_GPL(rt2x00lib_resume); /* * rt2x00lib module information. */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2x00 library"); MODULE_LICENSE("GPL"); |
13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 12 12 13 13 13 13 13 12 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 12 13 13 13 13 12 13 13 66 66 66 66 13 65 66 7 66 13 65 66 13 13 13 13 13 13 13 12 12 13 13 13 13 13 13 13 13 13 13 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 | // SPDX-License-Identifier: GPL-2.0 /* * fs/ext4/extents_status.c * * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> * Modified by * Allison Henderson <achender@linux.vnet.ibm.com> * Hugh Dickins <hughd@google.com> * Zheng Liu <wenqing.lz@taobao.com> * * Ext4 extents status tree core functions. */ #include <linux/list_sort.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "ext4.h" #include <trace/events/ext4.h> /* * According to previous discussion in Ext4 Developer Workshop, we * will introduce a new structure called io tree to track all extent * status in order to solve some problems that we have met * (e.g. Reservation space warning), and provide extent-level locking. * Delay extent tree is the first step to achieve this goal. It is * original built by Yongqiang Yang. At that time it is called delay * extent tree, whose goal is only track delayed extents in memory to * simplify the implementation of fiemap and bigalloc, and introduce * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called * delay extent tree at the first commit. But for better understand * what it does, it has been rename to extent status tree. * * Step1: * Currently the first step has been done. All delayed extents are * tracked in the tree. It maintains the delayed extent when a delayed * allocation is issued, and the delayed extent is written out or * invalidated. Therefore the implementation of fiemap and bigalloc * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. * * The following comment describes the implemenmtation of extent * status tree and future works. * * Step2: * In this step all extent status are tracked by extent status tree. * Thus, we can first try to lookup a block mapping in this tree before * finding it in extent tree. Hence, single extent cache can be removed * because extent status tree can do a better job. Extents in status * tree are loaded on-demand. Therefore, the extent status tree may not * contain all of the extents in a file. Meanwhile we define a shrinker * to reclaim memory from extent status tree because fragmented extent * tree will make status tree cost too much memory. written/unwritten/- * hole extents in the tree will be reclaimed by this shrinker when we * are under high memory pressure. Delayed extents will not be * reclimed because fiemap, bigalloc, and seek_data/hole need it. */ /* * Extent status tree implementation for ext4. * * * ========================================================================== * Extent status tree tracks all extent status. * * 1. Why we need to implement extent status tree? * * Without extent status tree, ext4 identifies a delayed extent by looking * up page cache, this has several deficiencies - complicated, buggy, * and inefficient code. * * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a * block or a range of blocks are belonged to a delayed extent. * * Let us have a look at how they do without extent status tree. * -- FIEMAP * FIEMAP looks up page cache to identify delayed allocations from holes. * * -- SEEK_HOLE/DATA * SEEK_HOLE/DATA has the same problem as FIEMAP. * * -- bigalloc * bigalloc looks up page cache to figure out if a block is * already under delayed allocation or not to determine whether * quota reserving is needed for the cluster. * * -- writeout * Writeout looks up whole page cache to see if a buffer is * mapped, If there are not very many delayed buffers, then it is * time consuming. * * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, * bigalloc and writeout can figure out if a block or a range of * blocks is under delayed allocation(belonged to a delayed extent) or * not by searching the extent tree. * * * ========================================================================== * 2. Ext4 extent status tree impelmentation * * -- extent * A extent is a range of blocks which are contiguous logically and * physically. Unlike extent in extent tree, this extent in ext4 is * a in-memory struct, there is no corresponding on-disk data. There * is no limit on length of extent, so an extent can contain as many * blocks as they are contiguous logically and physically. * * -- extent status tree * Every inode has an extent status tree and all allocation blocks * are added to the tree with different status. The extent in the * tree are ordered by logical block no. * * -- operations on a extent status tree * There are three important operations on a delayed extent tree: find * next extent, adding a extent(a range of blocks) and removing a extent. * * -- race on a extent status tree * Extent status tree is protected by inode->i_es_lock. * * -- memory consumption * Fragmented extent tree will make extent status tree cost too much * memory. Hence, we will reclaim written/unwritten/hole extents from * the tree under a heavy memory pressure. * * ========================================================================== * 3. Assurance of Ext4 extent status tree consistency * * When mapping blocks, Ext4 queries the extent status tree first and should * always trusts that the extent status tree is consistent and up to date. * Therefore, it is important to adheres to the following rules when createing, * modifying and removing extents. * * 1. Besides fastcommit replay, when Ext4 creates or queries block mappings, * the extent information should always be processed through the extent * status tree instead of being organized manually through the on-disk * extent tree. * * 2. When updating the extent tree, Ext4 should acquire the i_data_sem * exclusively and update the extent status tree atomically. If the extents * to be modified are large enough to exceed the range that a single * i_data_sem can process (as ext4_datasem_ensure_credits() may drop * i_data_sem to restart a transaction), it must (e.g. as ext4_punch_hole() * does): * * a) Hold the i_rwsem and invalidate_lock exclusively. This ensures * exclusion against page faults, as well as reads and writes that may * concurrently modify the extent status tree. * b) Evict all page cache in the affected range and recommend rebuilding * or dropping the extent status tree after modifying the on-disk * extent tree. This ensures exclusion against concurrent writebacks * that do not hold those locks but only holds a folio lock. * * 3. Based on the rules above, when querying block mappings, Ext4 should at * least hold the i_rwsem or invalidate_lock or folio lock(s) for the * specified querying range. * * ========================================================================== * 4. Performance analysis * * -- overhead * 1. There is a cache extent for write access, so if writes are * not very random, adding space operaions are in O(1) time. * * -- gain * 2. Code is much simpler, more readable, more maintainable and * more efficient. * * * ========================================================================== * 5. TODO list * * -- Refactor delayed space reservation * * -- Extent-level locking */ static struct kmem_cache *ext4_es_cachep; static struct kmem_cache *ext4_pending_cachep; static int __es_insert_extent(struct inode *inode, struct extent_status *newes, struct extent_status *prealloc); static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t end, int *reserved, struct extent_status *prealloc); static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, struct ext4_inode_info *locked_ei); static int __revise_pending(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, struct pending_reservation **prealloc); int __init ext4_init_es(void) { ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT); if (ext4_es_cachep == NULL) return -ENOMEM; return 0; } void ext4_exit_es(void) { kmem_cache_destroy(ext4_es_cachep); } void ext4_es_init_tree(struct ext4_es_tree *tree) { tree->root = RB_ROOT; tree->cache_es = NULL; } #ifdef ES_DEBUG__ static void ext4_es_print_tree(struct inode *inode) { struct ext4_es_tree *tree; struct rb_node *node; printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino); tree = &EXT4_I(inode)->i_es_tree; node = rb_first(&tree->root); while (node) { struct extent_status *es; es = rb_entry(node, struct extent_status, rb_node); printk(KERN_DEBUG " [%u/%u) %llu %x", es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); node = rb_next(node); } printk(KERN_DEBUG "\n"); } #else #define ext4_es_print_tree(inode) #endif static inline ext4_lblk_t ext4_es_end(struct extent_status *es) { BUG_ON(es->es_lblk + es->es_len < es->es_lblk); return es->es_lblk + es->es_len - 1; } /* * search through the tree for an delayed extent with a given offset. If * it can't be found, try to find next extent. */ static struct extent_status *__es_tree_search(struct rb_root *root, ext4_lblk_t lblk) { struct rb_node *node = root->rb_node; struct extent_status *es = NULL; while (node) { es = rb_entry(node, struct extent_status, rb_node); if (lblk < es->es_lblk) node = node->rb_left; else if (lblk > ext4_es_end(es)) node = node->rb_right; else return es; } if (es && lblk < es->es_lblk) return es; if (es && lblk > ext4_es_end(es)) { node = rb_next(&es->rb_node); return node ? rb_entry(node, struct extent_status, rb_node) : NULL; } return NULL; } /* * ext4_es_find_extent_range - find extent with specified status within block * range or next extent following block range in * extents status tree * * @inode - file containing the range * @matching_fn - pointer to function that matches extents with desired status * @lblk - logical block defining start of range * @end - logical block defining end of range * @es - extent found, if any * * Find the first extent within the block range specified by @lblk and @end * in the extents status tree that satisfies @matching_fn. If a match * is found, it's returned in @es. If not, and a matching extent is found * beyond the block range, it's returned in @es. If no match is found, an * extent is returned in @es whose es_lblk, es_len, and es_pblk components * are 0. */ static void __es_find_extent_range(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk, ext4_lblk_t end, struct extent_status *es) { struct ext4_es_tree *tree = NULL; struct extent_status *es1 = NULL; struct rb_node *node; WARN_ON(es == NULL); WARN_ON(end < lblk); tree = &EXT4_I(inode)->i_es_tree; /* see if the extent has been cached */ es->es_lblk = es->es_len = es->es_pblk = 0; es1 = READ_ONCE(tree->cache_es); if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { es_debug("%u cached by [%u/%u) %llu %x\n", lblk, es1->es_lblk, es1->es_len, ext4_es_pblock(es1), ext4_es_status(es1)); goto out; } es1 = __es_tree_search(&tree->root, lblk); out: if (es1 && !matching_fn(es1)) { while ((node = rb_next(&es1->rb_node)) != NULL) { es1 = rb_entry(node, struct extent_status, rb_node); if (es1->es_lblk > end) { es1 = NULL; break; } if (matching_fn(es1)) break; } } if (es1 && matching_fn(es1)) { WRITE_ONCE(tree->cache_es, es1); es->es_lblk = es1->es_lblk; es->es_len = es1->es_len; es->es_pblk = es1->es_pblk; } } /* * Locking for __es_find_extent_range() for external use */ void ext4_es_find_extent_range(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk, ext4_lblk_t end, struct extent_status *es) { es->es_lblk = es->es_len = es->es_pblk = 0; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; trace_ext4_es_find_extent_range_enter(inode, lblk); read_lock(&EXT4_I(inode)->i_es_lock); __es_find_extent_range(inode, matching_fn, lblk, end, es); read_unlock(&EXT4_I(inode)->i_es_lock); trace_ext4_es_find_extent_range_exit(inode, es); } /* * __es_scan_range - search block range for block with specified status * in extents status tree * * @inode - file containing the range * @matching_fn - pointer to function that matches extents with desired status * @lblk - logical block defining start of range * @end - logical block defining end of range * * Returns true if at least one block in the specified block range satisfies * the criterion specified by @matching_fn, and false if not. If at least * one extent has the specified status, then there is at least one block * in the cluster with that status. Should only be called by code that has * taken i_es_lock. */ static bool __es_scan_range(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t start, ext4_lblk_t end) { struct extent_status es; __es_find_extent_range(inode, matching_fn, start, end, &es); if (es.es_len == 0) return false; /* no matching extent in the tree */ else if (es.es_lblk <= start && start < es.es_lblk + es.es_len) return true; else if (start <= es.es_lblk && es.es_lblk <= end) return true; else return false; } /* * Locking for __es_scan_range() for external use */ bool ext4_es_scan_range(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk, ext4_lblk_t end) { bool ret; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return false; read_lock(&EXT4_I(inode)->i_es_lock); ret = __es_scan_range(inode, matching_fn, lblk, end); read_unlock(&EXT4_I(inode)->i_es_lock); return ret; } /* * __es_scan_clu - search cluster for block with specified status in * extents status tree * * @inode - file containing the cluster * @matching_fn - pointer to function that matches extents with desired status * @lblk - logical block in cluster to be searched * * Returns true if at least one extent in the cluster containing @lblk * satisfies the criterion specified by @matching_fn, and false if not. If at * least one extent has the specified status, then there is at least one block * in the cluster with that status. Should only be called by code that has * taken i_es_lock. */ static bool __es_scan_clu(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t lblk_start, lblk_end; lblk_start = EXT4_LBLK_CMASK(sbi, lblk); lblk_end = lblk_start + sbi->s_cluster_ratio - 1; return __es_scan_range(inode, matching_fn, lblk_start, lblk_end); } /* * Locking for __es_scan_clu() for external use */ bool ext4_es_scan_clu(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk) { bool ret; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return false; read_lock(&EXT4_I(inode)->i_es_lock); ret = __es_scan_clu(inode, matching_fn, lblk); read_unlock(&EXT4_I(inode)->i_es_lock); return ret; } static void ext4_es_list_add(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (!list_empty(&ei->i_es_list)) return; spin_lock(&sbi->s_es_lock); if (list_empty(&ei->i_es_list)) { list_add_tail(&ei->i_es_list, &sbi->s_es_list); sbi->s_es_nr_inode++; } spin_unlock(&sbi->s_es_lock); } static void ext4_es_list_del(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); spin_lock(&sbi->s_es_lock); if (!list_empty(&ei->i_es_list)) { list_del_init(&ei->i_es_list); sbi->s_es_nr_inode--; WARN_ON_ONCE(sbi->s_es_nr_inode < 0); } spin_unlock(&sbi->s_es_lock); } static inline struct pending_reservation *__alloc_pending(bool nofail) { if (!nofail) return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC); return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL); } static inline void __free_pending(struct pending_reservation *pr) { kmem_cache_free(ext4_pending_cachep, pr); } /* * Returns true if we cannot fail to allocate memory for this extent_status * entry and cannot reclaim it until its status changes. */ static inline bool ext4_es_must_keep(struct extent_status *es) { /* fiemap, bigalloc, and seek_data/hole need to use it. */ if (ext4_es_is_delayed(es)) return true; return false; } static inline struct extent_status *__es_alloc_extent(bool nofail) { if (!nofail) return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC); return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL); } static void ext4_es_init_extent(struct inode *inode, struct extent_status *es, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk) { es->es_lblk = lblk; es->es_len = len; es->es_pblk = pblk; /* We never try to reclaim a must kept extent, so we don't count it. */ if (!ext4_es_must_keep(es)) { if (!EXT4_I(inode)->i_es_shk_nr++) ext4_es_list_add(inode); percpu_counter_inc(&EXT4_SB(inode->i_sb)-> s_es_stats.es_stats_shk_cnt); } EXT4_I(inode)->i_es_all_nr++; percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); } static inline void __es_free_extent(struct extent_status *es) { kmem_cache_free(ext4_es_cachep, es); } static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) { EXT4_I(inode)->i_es_all_nr--; percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); /* Decrease the shrink counter when we can reclaim the extent. */ if (!ext4_es_must_keep(es)) { BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); if (!--EXT4_I(inode)->i_es_shk_nr) ext4_es_list_del(inode); percpu_counter_dec(&EXT4_SB(inode->i_sb)-> s_es_stats.es_stats_shk_cnt); } __es_free_extent(es); } /* * Check whether or not two extents can be merged * Condition: * - logical block number is contiguous * - physical block number is contiguous * - status is equal */ static int ext4_es_can_be_merged(struct extent_status *es1, struct extent_status *es2) { if (ext4_es_type(es1) != ext4_es_type(es2)) return 0; if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) { pr_warn("ES assertion failed when merging extents. " "The sum of lengths of es1 (%d) and es2 (%d) " "is bigger than allowed file size (%d)\n", es1->es_len, es2->es_len, EXT_MAX_BLOCKS); WARN_ON(1); return 0; } if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) return 0; if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) return 1; if (ext4_es_is_hole(es1)) return 1; /* we need to check delayed extent */ if (ext4_es_is_delayed(es1)) return 1; return 0; } static struct extent_status * ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct extent_status *es1; struct rb_node *node; node = rb_prev(&es->rb_node); if (!node) return es; es1 = rb_entry(node, struct extent_status, rb_node); if (ext4_es_can_be_merged(es1, es)) { es1->es_len += es->es_len; if (ext4_es_is_referenced(es)) ext4_es_set_referenced(es1); rb_erase(&es->rb_node, &tree->root); ext4_es_free_extent(inode, es); es = es1; } return es; } static struct extent_status * ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct extent_status *es1; struct rb_node *node; node = rb_next(&es->rb_node); if (!node) return es; es1 = rb_entry(node, struct extent_status, rb_node); if (ext4_es_can_be_merged(es, es1)) { es->es_len += es1->es_len; if (ext4_es_is_referenced(es1)) ext4_es_set_referenced(es); rb_erase(node, &tree->root); ext4_es_free_extent(inode, es1); } return es; } #ifdef ES_AGGRESSIVE_TEST #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */ static void ext4_es_insert_extent_ext_check(struct inode *inode, struct extent_status *es) { struct ext4_ext_path *path = NULL; struct ext4_extent *ex; ext4_lblk_t ee_block; ext4_fsblk_t ee_start; unsigned short ee_len; int depth, ee_status, es_status; path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); if (IS_ERR(path)) return; depth = ext_depth(inode); ex = path[depth].p_ext; if (ex) { ee_block = le32_to_cpu(ex->ee_block); ee_start = ext4_ext_pblock(ex); ee_len = ext4_ext_get_actual_len(ex); ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0; es_status = ext4_es_is_unwritten(es) ? 1 : 0; /* * Make sure ex and es are not overlap when we try to insert * a delayed/hole extent. */ if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { if (in_range(es->es_lblk, ee_block, ee_len)) { pr_warn("ES insert assertion failed for " "inode: %lu we can find an extent " "at block [%d/%d/%llu/%c], but we " "want to add a delayed/hole extent " "[%d/%d/%llu/%x]\n", inode->i_ino, ee_block, ee_len, ee_start, ee_status ? 'u' : 'w', es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); } goto out; } /* * We don't check ee_block == es->es_lblk, etc. because es * might be a part of whole extent, vice versa. */ if (es->es_lblk < ee_block || ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { pr_warn("ES insert assertion failed for inode: %lu " "ex_status [%d/%d/%llu/%c] != " "es_status [%d/%d/%llu/%c]\n", inode->i_ino, ee_block, ee_len, ee_start, ee_status ? 'u' : 'w', es->es_lblk, es->es_len, ext4_es_pblock(es), es_status ? 'u' : 'w'); goto out; } if (ee_status ^ es_status) { pr_warn("ES insert assertion failed for inode: %lu " "ex_status [%d/%d/%llu/%c] != " "es_status [%d/%d/%llu/%c]\n", inode->i_ino, ee_block, ee_len, ee_start, ee_status ? 'u' : 'w', es->es_lblk, es->es_len, ext4_es_pblock(es), es_status ? 'u' : 'w'); } } else { /* * We can't find an extent on disk. So we need to make sure * that we don't want to add an written/unwritten extent. */ if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { pr_warn("ES insert assertion failed for inode: %lu " "can't find an extent at block %d but we want " "to add a written/unwritten extent " "[%d/%d/%llu/%x]\n", inode->i_ino, es->es_lblk, es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); } } out: ext4_free_ext_path(path); } static void ext4_es_insert_extent_ind_check(struct inode *inode, struct extent_status *es) { struct ext4_map_blocks map; int retval; /* * Here we call ext4_ind_map_blocks to lookup a block mapping because * 'Indirect' structure is defined in indirect.c. So we couldn't * access direct/indirect tree from outside. It is too dirty to define * this function in indirect.c file. */ map.m_lblk = es->es_lblk; map.m_len = es->es_len; retval = ext4_ind_map_blocks(NULL, inode, &map, 0); if (retval > 0) { if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { /* * We want to add a delayed/hole extent but this * block has been allocated. */ pr_warn("ES insert assertion failed for inode: %lu " "We can find blocks but we want to add a " "delayed/hole extent [%d/%d/%llu/%x]\n", inode->i_ino, es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); return; } else if (ext4_es_is_written(es)) { if (retval != es->es_len) { pr_warn("ES insert assertion failed for " "inode: %lu retval %d != es_len %d\n", inode->i_ino, retval, es->es_len); return; } if (map.m_pblk != ext4_es_pblock(es)) { pr_warn("ES insert assertion failed for " "inode: %lu m_pblk %llu != " "es_pblk %llu\n", inode->i_ino, map.m_pblk, ext4_es_pblock(es)); return; } } else { /* * We don't need to check unwritten extent because * indirect-based file doesn't have it. */ BUG(); } } else if (retval == 0) { if (ext4_es_is_written(es)) { pr_warn("ES insert assertion failed for inode: %lu " "We can't find the block but we want to add " "a written extent [%d/%d/%llu/%x]\n", inode->i_ino, es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); return; } } } static inline void ext4_es_insert_extent_check(struct inode *inode, struct extent_status *es) { /* * We don't need to worry about the race condition because * caller takes i_data_sem locking. */ BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ext4_es_insert_extent_ext_check(inode, es); else ext4_es_insert_extent_ind_check(inode, es); } #else static inline void ext4_es_insert_extent_check(struct inode *inode, struct extent_status *es) { } #endif static int __es_insert_extent(struct inode *inode, struct extent_status *newes, struct extent_status *prealloc) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct rb_node **p = &tree->root.rb_node; struct rb_node *parent = NULL; struct extent_status *es; while (*p) { parent = *p; es = rb_entry(parent, struct extent_status, rb_node); if (newes->es_lblk < es->es_lblk) { if (ext4_es_can_be_merged(newes, es)) { /* * Here we can modify es_lblk directly * because it isn't overlapped. */ es->es_lblk = newes->es_lblk; es->es_len += newes->es_len; if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) ext4_es_store_pblock(es, newes->es_pblk); es = ext4_es_try_to_merge_left(inode, es); goto out; } p = &(*p)->rb_left; } else if (newes->es_lblk > ext4_es_end(es)) { if (ext4_es_can_be_merged(es, newes)) { es->es_len += newes->es_len; es = ext4_es_try_to_merge_right(inode, es); goto out; } p = &(*p)->rb_right; } else { BUG(); return -EINVAL; } } if (prealloc) es = prealloc; else es = __es_alloc_extent(false); if (!es) return -ENOMEM; ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len, newes->es_pblk); rb_link_node(&es->rb_node, parent, p); rb_insert_color(&es->rb_node, &tree->root); out: tree->cache_es = es; return 0; } /* * ext4_es_insert_extent() adds information to an inode's extent * status tree. */ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk, unsigned int status, bool delalloc_reserve_used) { struct extent_status newes; ext4_lblk_t end = lblk + len - 1; int err1 = 0, err2 = 0, err3 = 0; int resv_used = 0, pending = 0; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct extent_status *es1 = NULL; struct extent_status *es2 = NULL; struct pending_reservation *pr = NULL; bool revise_pending = false; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; es_debug("add [%u/%u) %llu %x %d to extent status tree of inode %lu\n", lblk, len, pblk, status, delalloc_reserve_used, inode->i_ino); if (!len) return; BUG_ON(end < lblk); WARN_ON_ONCE(status & EXTENT_STATUS_DELAYED); newes.es_lblk = lblk; newes.es_len = len; ext4_es_store_pblock_status(&newes, pblk, status); trace_ext4_es_insert_extent(inode, &newes); ext4_es_insert_extent_check(inode, &newes); revise_pending = sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) && (status & (EXTENT_STATUS_WRITTEN | EXTENT_STATUS_UNWRITTEN)); retry: if (err1 && !es1) es1 = __es_alloc_extent(true); if ((err1 || err2) && !es2) es2 = __es_alloc_extent(true); if ((err1 || err2 || err3 < 0) && revise_pending && !pr) pr = __alloc_pending(true); write_lock(&EXT4_I(inode)->i_es_lock); err1 = __es_remove_extent(inode, lblk, end, &resv_used, es1); if (err1 != 0) goto error; /* Free preallocated extent if it didn't get used. */ if (es1) { if (!es1->es_len) __es_free_extent(es1); es1 = NULL; } err2 = __es_insert_extent(inode, &newes, es2); if (err2 == -ENOMEM && !ext4_es_must_keep(&newes)) err2 = 0; if (err2 != 0) goto error; /* Free preallocated extent if it didn't get used. */ if (es2) { if (!es2->es_len) __es_free_extent(es2); es2 = NULL; } if (revise_pending) { err3 = __revise_pending(inode, lblk, len, &pr); if (err3 < 0) goto error; if (pr) { __free_pending(pr); pr = NULL; } pending = err3; } error: write_unlock(&EXT4_I(inode)->i_es_lock); /* * Reduce the reserved cluster count to reflect successful deferred * allocation of delayed allocated clusters or direct allocation of * clusters discovered to be delayed allocated. Once allocated, a * cluster is not included in the reserved count. * * When direct allocating (from fallocate, filemap, DIO, or clusters * allocated when delalloc has been disabled by ext4_nonda_switch()) * an extent either 1) contains delayed blocks but start with * non-delayed allocated blocks (e.g. hole) or 2) contains non-delayed * allocated blocks which belong to delayed allocated clusters when * bigalloc feature is enabled, quota has already been claimed by * ext4_mb_new_blocks(), so release the quota reservations made for * any previously delayed allocated clusters instead of claim them * again. */ resv_used += pending; if (resv_used) ext4_da_update_reserve_space(inode, resv_used, delalloc_reserve_used); if (err1 || err2 || err3 < 0) goto retry; ext4_es_print_tree(inode); return; } /* * ext4_es_cache_extent() inserts information into the extent status * tree if and only if there isn't information about the range in * question already. */ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk, unsigned int status) { struct extent_status *es; struct extent_status newes; ext4_lblk_t end = lblk + len - 1; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; newes.es_lblk = lblk; newes.es_len = len; ext4_es_store_pblock_status(&newes, pblk, status); trace_ext4_es_cache_extent(inode, &newes); if (!len) return; BUG_ON(end < lblk); write_lock(&EXT4_I(inode)->i_es_lock); es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); if (!es || es->es_lblk > end) __es_insert_extent(inode, &newes, NULL); write_unlock(&EXT4_I(inode)->i_es_lock); } /* * ext4_es_lookup_extent() looks up an extent in extent status tree. * * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. * * Return: 1 on found, 0 on not */ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t *next_lblk, struct extent_status *es) { struct ext4_es_tree *tree; struct ext4_es_stats *stats; struct extent_status *es1 = NULL; struct rb_node *node; int found = 0; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return 0; trace_ext4_es_lookup_extent_enter(inode, lblk); es_debug("lookup extent in block %u\n", lblk); tree = &EXT4_I(inode)->i_es_tree; read_lock(&EXT4_I(inode)->i_es_lock); /* find extent in cache firstly */ es->es_lblk = es->es_len = es->es_pblk = 0; es1 = READ_ONCE(tree->cache_es); if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { es_debug("%u cached by [%u/%u)\n", lblk, es1->es_lblk, es1->es_len); found = 1; goto out; } node = tree->root.rb_node; while (node) { es1 = rb_entry(node, struct extent_status, rb_node); if (lblk < es1->es_lblk) node = node->rb_left; else if (lblk > ext4_es_end(es1)) node = node->rb_right; else { found = 1; break; } } out: stats = &EXT4_SB(inode->i_sb)->s_es_stats; if (found) { BUG_ON(!es1); es->es_lblk = es1->es_lblk; es->es_len = es1->es_len; es->es_pblk = es1->es_pblk; if (!ext4_es_is_referenced(es1)) ext4_es_set_referenced(es1); percpu_counter_inc(&stats->es_stats_cache_hits); if (next_lblk) { node = rb_next(&es1->rb_node); if (node) { es1 = rb_entry(node, struct extent_status, rb_node); *next_lblk = es1->es_lblk; } else *next_lblk = 0; } } else { percpu_counter_inc(&stats->es_stats_cache_misses); } read_unlock(&EXT4_I(inode)->i_es_lock); trace_ext4_es_lookup_extent_exit(inode, es, found); return found; } struct rsvd_count { int ndelayed; bool first_do_lblk_found; ext4_lblk_t first_do_lblk; ext4_lblk_t last_do_lblk; struct extent_status *left_es; bool partial; ext4_lblk_t lclu; }; /* * init_rsvd - initialize reserved count data before removing block range * in file from extent status tree * * @inode - file containing range * @lblk - first block in range * @es - pointer to first extent in range * @rc - pointer to reserved count data * * Assumes es is not NULL */ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, struct extent_status *es, struct rsvd_count *rc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct rb_node *node; rc->ndelayed = 0; /* * for bigalloc, note the first delayed block in the range has not * been found, record the extent containing the block to the left of * the region to be removed, if any, and note that there's no partial * cluster to track */ if (sbi->s_cluster_ratio > 1) { rc->first_do_lblk_found = false; if (lblk > es->es_lblk) { rc->left_es = es; } else { node = rb_prev(&es->rb_node); rc->left_es = node ? rb_entry(node, struct extent_status, rb_node) : NULL; } rc->partial = false; } } /* * count_rsvd - count the clusters containing delayed blocks in a range * within an extent and add to the running tally in rsvd_count * * @inode - file containing extent * @lblk - first block in range * @len - length of range in blocks * @es - pointer to extent containing clusters to be counted * @rc - pointer to reserved count data * * Tracks partial clusters found at the beginning and end of extents so * they aren't overcounted when they span adjacent extents */ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, struct extent_status *es, struct rsvd_count *rc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t i, end, nclu; if (!ext4_es_is_delayed(es)) return; WARN_ON(len <= 0); if (sbi->s_cluster_ratio == 1) { rc->ndelayed += (int) len; return; } /* bigalloc */ i = (lblk < es->es_lblk) ? es->es_lblk : lblk; end = lblk + (ext4_lblk_t) len - 1; end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; /* record the first block of the first delayed extent seen */ if (!rc->first_do_lblk_found) { rc->first_do_lblk = i; rc->first_do_lblk_found = true; } /* update the last lblk in the region seen so far */ rc->last_do_lblk = end; /* * if we're tracking a partial cluster and the current extent * doesn't start with it, count it and stop tracking */ if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) { rc->ndelayed++; rc->partial = false; } /* * if the first cluster doesn't start on a cluster boundary but * ends on one, count it */ if (EXT4_LBLK_COFF(sbi, i) != 0) { if (end >= EXT4_LBLK_CFILL(sbi, i)) { rc->ndelayed++; rc->partial = false; i = EXT4_LBLK_CFILL(sbi, i) + 1; } } /* * if the current cluster starts on a cluster boundary, count the * number of whole delayed clusters in the extent */ if ((i + sbi->s_cluster_ratio - 1) <= end) { nclu = (end - i + 1) >> sbi->s_cluster_bits; rc->ndelayed += nclu; i += nclu << sbi->s_cluster_bits; } /* * start tracking a partial cluster if there's a partial at the end * of the current extent and we're not already tracking one */ if (!rc->partial && i <= end) { rc->partial = true; rc->lclu = EXT4_B2C(sbi, i); } } /* * __pr_tree_search - search for a pending cluster reservation * * @root - root of pending reservation tree * @lclu - logical cluster to search for * * Returns the pending reservation for the cluster identified by @lclu * if found. If not, returns a reservation for the next cluster if any, * and if not, returns NULL. */ static struct pending_reservation *__pr_tree_search(struct rb_root *root, ext4_lblk_t lclu) { struct rb_node *node = root->rb_node; struct pending_reservation *pr = NULL; while (node) { pr = rb_entry(node, struct pending_reservation, rb_node); if (lclu < pr->lclu) node = node->rb_left; else if (lclu > pr->lclu) node = node->rb_right; else return pr; } if (pr && lclu < pr->lclu) return pr; if (pr && lclu > pr->lclu) { node = rb_next(&pr->rb_node); return node ? rb_entry(node, struct pending_reservation, rb_node) : NULL; } return NULL; } /* * get_rsvd - calculates and returns the number of cluster reservations to be * released when removing a block range from the extent status tree * and releases any pending reservations within the range * * @inode - file containing block range * @end - last block in range * @right_es - pointer to extent containing next block beyond end or NULL * @rc - pointer to reserved count data * * The number of reservations to be released is equal to the number of * clusters containing delayed blocks within the range, minus the number of * clusters still containing delayed blocks at the ends of the range, and * minus the number of pending reservations within the range. */ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, struct extent_status *right_es, struct rsvd_count *rc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct pending_reservation *pr; struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; struct rb_node *node; ext4_lblk_t first_lclu, last_lclu; bool left_delayed, right_delayed, count_pending; struct extent_status *es; if (sbi->s_cluster_ratio > 1) { /* count any remaining partial cluster */ if (rc->partial) rc->ndelayed++; if (rc->ndelayed == 0) return 0; first_lclu = EXT4_B2C(sbi, rc->first_do_lblk); last_lclu = EXT4_B2C(sbi, rc->last_do_lblk); /* * decrease the delayed count by the number of clusters at the * ends of the range that still contain delayed blocks - * these clusters still need to be reserved */ left_delayed = right_delayed = false; es = rc->left_es; while (es && ext4_es_end(es) >= EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) { if (ext4_es_is_delayed(es)) { rc->ndelayed--; left_delayed = true; break; } node = rb_prev(&es->rb_node); if (!node) break; es = rb_entry(node, struct extent_status, rb_node); } if (right_es && (!left_delayed || first_lclu != last_lclu)) { if (end < ext4_es_end(right_es)) { es = right_es; } else { node = rb_next(&right_es->rb_node); es = node ? rb_entry(node, struct extent_status, rb_node) : NULL; } while (es && es->es_lblk <= EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) { if (ext4_es_is_delayed(es)) { rc->ndelayed--; right_delayed = true; break; } node = rb_next(&es->rb_node); if (!node) break; es = rb_entry(node, struct extent_status, rb_node); } } /* * Determine the block range that should be searched for * pending reservations, if any. Clusters on the ends of the * original removed range containing delayed blocks are * excluded. They've already been accounted for and it's not * possible to determine if an associated pending reservation * should be released with the information available in the * extents status tree. */ if (first_lclu == last_lclu) { if (left_delayed | right_delayed) count_pending = false; else count_pending = true; } else { if (left_delayed) first_lclu++; if (right_delayed) last_lclu--; if (first_lclu <= last_lclu) count_pending = true; else count_pending = false; } /* * a pending reservation found between first_lclu and last_lclu * represents an allocated cluster that contained at least one * delayed block, so the delayed total must be reduced by one * for each pending reservation found and released */ if (count_pending) { pr = __pr_tree_search(&tree->root, first_lclu); while (pr && pr->lclu <= last_lclu) { rc->ndelayed--; node = rb_next(&pr->rb_node); rb_erase(&pr->rb_node, &tree->root); __free_pending(pr); if (!node) break; pr = rb_entry(node, struct pending_reservation, rb_node); } } } return rc->ndelayed; } /* * __es_remove_extent - removes block range from extent status tree * * @inode - file containing range * @lblk - first block in range * @end - last block in range * @reserved - number of cluster reservations released * @prealloc - pre-allocated es to avoid memory allocation failures * * If @reserved is not NULL and delayed allocation is enabled, counts * block/cluster reservations freed by removing range and if bigalloc * enabled cancels pending reservations as needed. Returns 0 on success, * error code on failure. */ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t end, int *reserved, struct extent_status *prealloc) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct rb_node *node; struct extent_status *es; struct extent_status orig_es; ext4_lblk_t len1, len2; ext4_fsblk_t block; int err = 0; bool count_reserved = true; struct rsvd_count rc; if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC)) count_reserved = false; es = __es_tree_search(&tree->root, lblk); if (!es) goto out; if (es->es_lblk > end) goto out; /* Simply invalidate cache_es. */ tree->cache_es = NULL; if (count_reserved) init_rsvd(inode, lblk, es, &rc); orig_es.es_lblk = es->es_lblk; orig_es.es_len = es->es_len; orig_es.es_pblk = es->es_pblk; len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; if (len1 > 0) es->es_len = len1; if (len2 > 0) { if (len1 > 0) { struct extent_status newes; newes.es_lblk = end + 1; newes.es_len = len2; block = 0x7FDEADBEEFULL; if (ext4_es_is_written(&orig_es) || ext4_es_is_unwritten(&orig_es)) block = ext4_es_pblock(&orig_es) + orig_es.es_len - len2; ext4_es_store_pblock_status(&newes, block, ext4_es_status(&orig_es)); err = __es_insert_extent(inode, &newes, prealloc); if (err) { if (!ext4_es_must_keep(&newes)) return 0; es->es_lblk = orig_es.es_lblk; es->es_len = orig_es.es_len; goto out; } } else { es->es_lblk = end + 1; es->es_len = len2; if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { block = orig_es.es_pblk + orig_es.es_len - len2; ext4_es_store_pblock(es, block); } } if (count_reserved) count_rsvd(inode, orig_es.es_lblk + len1, orig_es.es_len - len1 - len2, &orig_es, &rc); goto out_get_reserved; } if (len1 > 0) { if (count_reserved) count_rsvd(inode, lblk, orig_es.es_len - len1, &orig_es, &rc); node = rb_next(&es->rb_node); if (node) es = rb_entry(node, struct extent_status, rb_node); else es = NULL; } while (es && ext4_es_end(es) <= end) { if (count_reserved) count_rsvd(inode, es->es_lblk, es->es_len, es, &rc); node = rb_next(&es->rb_node); rb_erase(&es->rb_node, &tree->root); ext4_es_free_extent(inode, es); if (!node) { es = NULL; break; } es = rb_entry(node, struct extent_status, rb_node); } if (es && es->es_lblk < end + 1) { ext4_lblk_t orig_len = es->es_len; len1 = ext4_es_end(es) - end; if (count_reserved) count_rsvd(inode, es->es_lblk, orig_len - len1, es, &rc); es->es_lblk = end + 1; es->es_len = len1; if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { block = es->es_pblk + orig_len - len1; ext4_es_store_pblock(es, block); } } out_get_reserved: if (count_reserved) *reserved = get_rsvd(inode, end, es, &rc); out: return err; } /* * ext4_es_remove_extent - removes block range from extent status tree * * @inode - file containing range * @lblk - first block in range * @len - number of blocks to remove * * Reduces block/cluster reservation count and for bigalloc cancels pending * reservations as needed. */ void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len) { ext4_lblk_t end; int err = 0; int reserved = 0; struct extent_status *es = NULL; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; trace_ext4_es_remove_extent(inode, lblk, len); es_debug("remove [%u/%u) from extent status tree of inode %lu\n", lblk, len, inode->i_ino); if (!len) return; end = lblk + len - 1; BUG_ON(end < lblk); retry: if (err && !es) es = __es_alloc_extent(true); /* * ext4_clear_inode() depends on us taking i_es_lock unconditionally * so that we are sure __es_shrink() is done with the inode before it * is reclaimed. */ write_lock(&EXT4_I(inode)->i_es_lock); err = __es_remove_extent(inode, lblk, end, &reserved, es); /* Free preallocated extent if it didn't get used. */ if (es) { if (!es->es_len) __es_free_extent(es); es = NULL; } write_unlock(&EXT4_I(inode)->i_es_lock); if (err) goto retry; ext4_es_print_tree(inode); ext4_da_release_space(inode, reserved); } static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, struct ext4_inode_info *locked_ei) { struct ext4_inode_info *ei; struct ext4_es_stats *es_stats; ktime_t start_time; u64 scan_time; int nr_to_walk; int nr_shrunk = 0; int retried = 0, nr_skipped = 0; es_stats = &sbi->s_es_stats; start_time = ktime_get(); retry: spin_lock(&sbi->s_es_lock); nr_to_walk = sbi->s_es_nr_inode; while (nr_to_walk-- > 0) { if (list_empty(&sbi->s_es_list)) { spin_unlock(&sbi->s_es_lock); goto out; } ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, i_es_list); /* Move the inode to the tail */ list_move_tail(&ei->i_es_list, &sbi->s_es_list); /* * Normally we try hard to avoid shrinking precached inodes, * but we will as a last resort. */ if (!retried && ext4_test_inode_state(&ei->vfs_inode, EXT4_STATE_EXT_PRECACHED)) { nr_skipped++; continue; } if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) { nr_skipped++; continue; } /* * Now we hold i_es_lock which protects us from inode reclaim * freeing inode under us */ spin_unlock(&sbi->s_es_lock); nr_shrunk += es_reclaim_extents(ei, &nr_to_scan); write_unlock(&ei->i_es_lock); if (nr_to_scan <= 0) goto out; spin_lock(&sbi->s_es_lock); } spin_unlock(&sbi->s_es_lock); /* * If we skipped any inodes, and we weren't able to make any * forward progress, try again to scan precached inodes. */ if ((nr_shrunk == 0) && nr_skipped && !retried) { retried++; goto retry; } if (locked_ei && nr_shrunk == 0) nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan); out: scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); if (likely(es_stats->es_stats_scan_time)) es_stats->es_stats_scan_time = (scan_time + es_stats->es_stats_scan_time*3) / 4; else es_stats->es_stats_scan_time = scan_time; if (scan_time > es_stats->es_stats_max_scan_time) es_stats->es_stats_max_scan_time = scan_time; if (likely(es_stats->es_stats_shrunk)) es_stats->es_stats_shrunk = (nr_shrunk + es_stats->es_stats_shrunk*3) / 4; else es_stats->es_stats_shrunk = nr_shrunk; trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, nr_skipped, retried); return nr_shrunk; } static unsigned long ext4_es_count(struct shrinker *shrink, struct shrink_control *sc) { unsigned long nr; struct ext4_sb_info *sbi; sbi = shrink->private_data; nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); return nr; } static unsigned long ext4_es_scan(struct shrinker *shrink, struct shrink_control *sc) { struct ext4_sb_info *sbi = shrink->private_data; int nr_to_scan = sc->nr_to_scan; int ret, nr_shrunk; ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); return nr_shrunk; } int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v) { struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private); struct ext4_es_stats *es_stats = &sbi->s_es_stats; struct ext4_inode_info *ei, *max = NULL; unsigned int inode_cnt = 0; if (v != SEQ_START_TOKEN) return 0; /* here we just find an inode that has the max nr. of objects */ spin_lock(&sbi->s_es_lock); list_for_each_entry(ei, &sbi->s_es_list, i_es_list) { inode_cnt++; if (max && max->i_es_all_nr < ei->i_es_all_nr) max = ei; else if (!max) max = ei; } spin_unlock(&sbi->s_es_lock); seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt)); seq_printf(seq, " %lld/%lld cache hits/misses\n", percpu_counter_sum_positive(&es_stats->es_stats_cache_hits), percpu_counter_sum_positive(&es_stats->es_stats_cache_misses)); if (inode_cnt) seq_printf(seq, " %d inodes on list\n", inode_cnt); seq_printf(seq, "average:\n %llu us scan time\n", div_u64(es_stats->es_stats_scan_time, 1000)); seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk); if (inode_cnt) seq_printf(seq, "maximum:\n %lu inode (%u objects, %u reclaimable)\n" " %llu us max scan time\n", max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr, div_u64(es_stats->es_stats_max_scan_time, 1000)); return 0; } int ext4_es_register_shrinker(struct ext4_sb_info *sbi) { int err; /* Make sure we have enough bits for physical block number */ BUILD_BUG_ON(ES_SHIFT < 48); INIT_LIST_HEAD(&sbi->s_es_list); sbi->s_es_nr_inode = 0; spin_lock_init(&sbi->s_es_lock); sbi->s_es_stats.es_stats_shrunk = 0; err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0, GFP_KERNEL); if (err) return err; err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0, GFP_KERNEL); if (err) goto err1; sbi->s_es_stats.es_stats_scan_time = 0; sbi->s_es_stats.es_stats_max_scan_time = 0; err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); if (err) goto err2; err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL); if (err) goto err3; sbi->s_es_shrinker = shrinker_alloc(0, "ext4-es:%s", sbi->s_sb->s_id); if (!sbi->s_es_shrinker) { err = -ENOMEM; goto err4; } sbi->s_es_shrinker->scan_objects = ext4_es_scan; sbi->s_es_shrinker->count_objects = ext4_es_count; sbi->s_es_shrinker->private_data = sbi; shrinker_register(sbi->s_es_shrinker); return 0; err4: percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); err3: percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); err2: percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); err1: percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); return err; } void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) { percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); shrinker_free(sbi->s_es_shrinker); } /* * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at * most *nr_to_scan extents, update *nr_to_scan accordingly. * * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan. * Increment *nr_shrunk by the number of reclaimed extents. Also update * ei->i_es_shrink_lblk to where we should continue scanning. */ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end, int *nr_to_scan, int *nr_shrunk) { struct inode *inode = &ei->vfs_inode; struct ext4_es_tree *tree = &ei->i_es_tree; struct extent_status *es; struct rb_node *node; es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk); if (!es) goto out_wrap; while (*nr_to_scan > 0) { if (es->es_lblk > end) { ei->i_es_shrink_lblk = end + 1; return 0; } (*nr_to_scan)--; node = rb_next(&es->rb_node); if (ext4_es_must_keep(es)) goto next; if (ext4_es_is_referenced(es)) { ext4_es_clear_referenced(es); goto next; } rb_erase(&es->rb_node, &tree->root); ext4_es_free_extent(inode, es); (*nr_shrunk)++; next: if (!node) goto out_wrap; es = rb_entry(node, struct extent_status, rb_node); } ei->i_es_shrink_lblk = es->es_lblk; return 1; out_wrap: ei->i_es_shrink_lblk = 0; return 0; } static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan) { struct inode *inode = &ei->vfs_inode; int nr_shrunk = 0; ext4_lblk_t start = ei->i_es_shrink_lblk; static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); if (ei->i_es_shk_nr == 0) return 0; if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && __ratelimit(&_rs)) ext4_warning(inode->i_sb, "forced shrink of precached extents"); if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) && start != 0) es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk); ei->i_es_tree.cache_es = NULL; return nr_shrunk; } /* * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove * discretionary entries from the extent status cache. (Some entries * must be present for proper operations.) */ void ext4_clear_inode_es(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct extent_status *es; struct ext4_es_tree *tree; struct rb_node *node; write_lock(&ei->i_es_lock); tree = &EXT4_I(inode)->i_es_tree; tree->cache_es = NULL; node = rb_first(&tree->root); while (node) { es = rb_entry(node, struct extent_status, rb_node); node = rb_next(node); if (!ext4_es_must_keep(es)) { rb_erase(&es->rb_node, &tree->root); ext4_es_free_extent(inode, es); } } ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED); write_unlock(&ei->i_es_lock); } #ifdef ES_DEBUG__ static void ext4_print_pending_tree(struct inode *inode) { struct ext4_pending_tree *tree; struct rb_node *node; struct pending_reservation *pr; printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino); tree = &EXT4_I(inode)->i_pending_tree; node = rb_first(&tree->root); while (node) { pr = rb_entry(node, struct pending_reservation, rb_node); printk(KERN_DEBUG " %u", pr->lclu); node = rb_next(node); } printk(KERN_DEBUG "\n"); } #else #define ext4_print_pending_tree(inode) #endif int __init ext4_init_pending(void) { ext4_pending_cachep = KMEM_CACHE(pending_reservation, SLAB_RECLAIM_ACCOUNT); if (ext4_pending_cachep == NULL) return -ENOMEM; return 0; } void ext4_exit_pending(void) { kmem_cache_destroy(ext4_pending_cachep); } void ext4_init_pending_tree(struct ext4_pending_tree *tree) { tree->root = RB_ROOT; } /* * __get_pending - retrieve a pointer to a pending reservation * * @inode - file containing the pending cluster reservation * @lclu - logical cluster of interest * * Returns a pointer to a pending reservation if it's a member of * the set, and NULL if not. Must be called holding i_es_lock. */ static struct pending_reservation *__get_pending(struct inode *inode, ext4_lblk_t lclu) { struct ext4_pending_tree *tree; struct rb_node *node; struct pending_reservation *pr = NULL; tree = &EXT4_I(inode)->i_pending_tree; node = (&tree->root)->rb_node; while (node) { pr = rb_entry(node, struct pending_reservation, rb_node); if (lclu < pr->lclu) node = node->rb_left; else if (lclu > pr->lclu) node = node->rb_right; else if (lclu == pr->lclu) return pr; } return NULL; } /* * __insert_pending - adds a pending cluster reservation to the set of * pending reservations * * @inode - file containing the cluster * @lblk - logical block in the cluster to be added * @prealloc - preallocated pending entry * * Returns 1 on successful insertion and -ENOMEM on failure. If the * pending reservation is already in the set, returns successfully. */ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk, struct pending_reservation **prealloc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; struct rb_node **p = &tree->root.rb_node; struct rb_node *parent = NULL; struct pending_reservation *pr; ext4_lblk_t lclu; int ret = 0; lclu = EXT4_B2C(sbi, lblk); /* search to find parent for insertion */ while (*p) { parent = *p; pr = rb_entry(parent, struct pending_reservation, rb_node); if (lclu < pr->lclu) { p = &(*p)->rb_left; } else if (lclu > pr->lclu) { p = &(*p)->rb_right; } else { /* pending reservation already inserted */ goto out; } } if (likely(*prealloc == NULL)) { pr = __alloc_pending(false); if (!pr) { ret = -ENOMEM; goto out; } } else { pr = *prealloc; *prealloc = NULL; } pr->lclu = lclu; rb_link_node(&pr->rb_node, parent, p); rb_insert_color(&pr->rb_node, &tree->root); ret = 1; out: return ret; } /* * __remove_pending - removes a pending cluster reservation from the set * of pending reservations * * @inode - file containing the cluster * @lblk - logical block in the pending cluster reservation to be removed * * Returns successfully if pending reservation is not a member of the set. */ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct pending_reservation *pr; struct ext4_pending_tree *tree; pr = __get_pending(inode, EXT4_B2C(sbi, lblk)); if (pr != NULL) { tree = &EXT4_I(inode)->i_pending_tree; rb_erase(&pr->rb_node, &tree->root); __free_pending(pr); } } /* * ext4_remove_pending - removes a pending cluster reservation from the set * of pending reservations * * @inode - file containing the cluster * @lblk - logical block in the pending cluster reservation to be removed * * Locking for external use of __remove_pending. */ void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk) { struct ext4_inode_info *ei = EXT4_I(inode); write_lock(&ei->i_es_lock); __remove_pending(inode, lblk); write_unlock(&ei->i_es_lock); } /* * ext4_is_pending - determine whether a cluster has a pending reservation * on it * * @inode - file containing the cluster * @lblk - logical block in the cluster * * Returns true if there's a pending reservation for the cluster in the * set of pending reservations, and false if not. */ bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); bool ret; read_lock(&ei->i_es_lock); ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL); read_unlock(&ei->i_es_lock); return ret; } /* * ext4_es_insert_delayed_extent - adds some delayed blocks to the extents * status tree, adding a pending reservation * where needed * * @inode - file containing the newly added block * @lblk - start logical block to be added * @len - length of blocks to be added * @lclu_allocated/end_allocated - indicates whether a physical cluster has * been allocated for the logical cluster * that contains the start/end block. Note that * end_allocated should always be set to false * if the start and the end block are in the * same cluster */ void ext4_es_insert_delayed_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, bool lclu_allocated, bool end_allocated) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct extent_status newes; ext4_lblk_t end = lblk + len - 1; int err1 = 0, err2 = 0, err3 = 0; struct extent_status *es1 = NULL; struct extent_status *es2 = NULL; struct pending_reservation *pr1 = NULL; struct pending_reservation *pr2 = NULL; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; es_debug("add [%u/%u) delayed to extent status tree of inode %lu\n", lblk, len, inode->i_ino); if (!len) return; WARN_ON_ONCE((EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) && end_allocated); newes.es_lblk = lblk; newes.es_len = len; ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED); trace_ext4_es_insert_delayed_extent(inode, &newes, lclu_allocated, end_allocated); ext4_es_insert_extent_check(inode, &newes); retry: if (err1 && !es1) es1 = __es_alloc_extent(true); if ((err1 || err2) && !es2) es2 = __es_alloc_extent(true); if (err1 || err2 || err3 < 0) { if (lclu_allocated && !pr1) pr1 = __alloc_pending(true); if (end_allocated && !pr2) pr2 = __alloc_pending(true); } write_lock(&EXT4_I(inode)->i_es_lock); err1 = __es_remove_extent(inode, lblk, end, NULL, es1); if (err1 != 0) goto error; /* Free preallocated extent if it didn't get used. */ if (es1) { if (!es1->es_len) __es_free_extent(es1); es1 = NULL; } err2 = __es_insert_extent(inode, &newes, es2); if (err2 != 0) goto error; /* Free preallocated extent if it didn't get used. */ if (es2) { if (!es2->es_len) __es_free_extent(es2); es2 = NULL; } if (lclu_allocated) { err3 = __insert_pending(inode, lblk, &pr1); if (err3 < 0) goto error; if (pr1) { __free_pending(pr1); pr1 = NULL; } } if (end_allocated) { err3 = __insert_pending(inode, end, &pr2); if (err3 < 0) goto error; if (pr2) { __free_pending(pr2); pr2 = NULL; } } error: write_unlock(&EXT4_I(inode)->i_es_lock); if (err1 || err2 || err3 < 0) goto retry; ext4_es_print_tree(inode); ext4_print_pending_tree(inode); return; } /* * __revise_pending - makes, cancels, or leaves unchanged pending cluster * reservations for a specified block range depending * upon the presence or absence of delayed blocks * outside the range within clusters at the ends of the * range * * @inode - file containing the range * @lblk - logical block defining the start of range * @len - length of range in blocks * @prealloc - preallocated pending entry * * Used after a newly allocated extent is added to the extents status tree. * Requires that the extents in the range have either written or unwritten * status. Must be called while holding i_es_lock. Returns number of new * inserts pending cluster on insert pendings, returns 0 on remove pendings, * return -ENOMEM on failure. */ static int __revise_pending(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, struct pending_reservation **prealloc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t end = lblk + len - 1; ext4_lblk_t first, last; bool f_del = false, l_del = false; int pendings = 0; int ret = 0; if (len == 0) return 0; /* * Two cases - block range within single cluster and block range * spanning two or more clusters. Note that a cluster belonging * to a range starting and/or ending on a cluster boundary is treated * as if it does not contain a delayed extent. The new range may * have allocated space for previously delayed blocks out to the * cluster boundary, requiring that any pre-existing pending * reservation be canceled. Because this code only looks at blocks * outside the range, it should revise pending reservations * correctly even if the extent represented by the range can't be * inserted in the extents status tree due to ENOSPC. */ if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) { first = EXT4_LBLK_CMASK(sbi, lblk); if (first != lblk) f_del = __es_scan_range(inode, &ext4_es_is_delayed, first, lblk - 1); if (f_del) { ret = __insert_pending(inode, first, prealloc); if (ret < 0) goto out; pendings += ret; } else { last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; if (last != end) l_del = __es_scan_range(inode, &ext4_es_is_delayed, end + 1, last); if (l_del) { ret = __insert_pending(inode, last, prealloc); if (ret < 0) goto out; pendings += ret; } else __remove_pending(inode, last); } } else { first = EXT4_LBLK_CMASK(sbi, lblk); if (first != lblk) f_del = __es_scan_range(inode, &ext4_es_is_delayed, first, lblk - 1); if (f_del) { ret = __insert_pending(inode, first, prealloc); if (ret < 0) goto out; pendings += ret; } else __remove_pending(inode, first); last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; if (last != end) l_del = __es_scan_range(inode, &ext4_es_is_delayed, end + 1, last); if (l_del) { ret = __insert_pending(inode, last, prealloc); if (ret < 0) goto out; pendings += ret; } else __remove_pending(inode, last); } out: return (ret < 0) ? ret : pendings; } |
13 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 | #ifndef __NET_SCHED_CODEL_IMPL_H #define __NET_SCHED_CODEL_IMPL_H /* * Codel - The Controlled-Delay Active Queue Management algorithm * * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ /* Controlling Queue Delay (CoDel) algorithm * ========================================= * Source : Kathleen Nichols and Van Jacobson * http://queue.acm.org/detail.cfm?id=2209336 * * Implemented on linux by Dave Taht and Eric Dumazet */ #include <net/inet_ecn.h> static void codel_params_init(struct codel_params *params) { params->interval = MS2TIME(100); params->target = MS2TIME(5); params->ce_threshold = CODEL_DISABLED_THRESHOLD; params->ce_threshold_mask = 0; params->ce_threshold_selector = 0; params->ecn = false; } static void codel_vars_init(struct codel_vars *vars) { memset(vars, 0, sizeof(*vars)); } static void codel_stats_init(struct codel_stats *stats) { stats->maxpacket = 0; } /* * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) * * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 */ static void codel_Newton_step(struct codel_vars *vars) { u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT; u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2); val >>= 2; /* avoid overflow in following multiply */ val = (val * invsqrt) >> (32 - 2 + 1); vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT; } /* * CoDel control_law is t + interval/sqrt(count) * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid * both sqrt() and divide operation. */ static codel_time_t codel_control_law(codel_time_t t, codel_time_t interval, u32 rec_inv_sqrt) { return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); } static bool codel_should_drop(const struct sk_buff *skb, void *ctx, struct codel_vars *vars, struct codel_params *params, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, u32 *backlog, codel_time_t now) { bool ok_to_drop; u32 skb_len; if (!skb) { vars->first_above_time = 0; return false; } skb_len = skb_len_func(skb); vars->ldelay = now - skb_time_func(skb); if (unlikely(skb_len > stats->maxpacket)) stats->maxpacket = skb_len; if (codel_time_before(vars->ldelay, params->target) || *backlog <= params->mtu) { /* went below - stay below for at least interval */ vars->first_above_time = 0; return false; } ok_to_drop = false; if (vars->first_above_time == 0) { /* just went above from below. If we stay above * for at least interval we'll say it's ok to drop */ vars->first_above_time = now + params->interval; } else if (codel_time_after(now, vars->first_above_time)) { ok_to_drop = true; } return ok_to_drop; } static struct sk_buff *codel_dequeue(void *ctx, u32 *backlog, struct codel_params *params, struct codel_vars *vars, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, codel_skb_drop_t drop_func, codel_skb_dequeue_t dequeue_func) { struct sk_buff *skb = dequeue_func(vars, ctx); codel_time_t now; bool drop; if (!skb) { vars->dropping = false; return skb; } now = codel_get_time(); drop = codel_should_drop(skb, ctx, vars, params, stats, skb_len_func, skb_time_func, backlog, now); if (vars->dropping) { if (!drop) { /* sojourn time below target - leave dropping state */ vars->dropping = false; } else if (codel_time_after_eq(now, vars->drop_next)) { /* It's time for the next drop. Drop the current * packet and dequeue the next. The dequeue might * take us out of dropping state. * If not, schedule the next drop. * A large backlog might result in drop rates so high * that the next drop should happen now, * hence the while loop. */ while (vars->dropping && codel_time_after_eq(now, vars->drop_next)) { vars->count++; /* dont care of possible wrap * since there is no more divide */ codel_Newton_step(vars); if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; vars->drop_next = codel_control_law(vars->drop_next, params->interval, vars->rec_inv_sqrt); goto end; } stats->drop_len += skb_len_func(skb); drop_func(skb, ctx); stats->drop_count++; skb = dequeue_func(vars, ctx); if (!codel_should_drop(skb, ctx, vars, params, stats, skb_len_func, skb_time_func, backlog, now)) { /* leave dropping state */ vars->dropping = false; } else { /* and schedule the next drop */ vars->drop_next = codel_control_law(vars->drop_next, params->interval, vars->rec_inv_sqrt); } } } } else if (drop) { u32 delta; if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; } else { stats->drop_len += skb_len_func(skb); drop_func(skb, ctx); stats->drop_count++; skb = dequeue_func(vars, ctx); drop = codel_should_drop(skb, ctx, vars, params, stats, skb_len_func, skb_time_func, backlog, now); } vars->dropping = true; /* if min went above target close to when we last went below it * assume that the drop rate that controlled the queue on the * last cycle is a good starting point to control it now. */ delta = vars->count - vars->lastcount; if (delta > 1 && codel_time_before(now - vars->drop_next, 16 * params->interval)) { vars->count = delta; /* we dont care if rec_inv_sqrt approximation * is not very precise : * Next Newton steps will correct it quadratically. */ codel_Newton_step(vars); } else { vars->count = 1; vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; } vars->lastcount = vars->count; vars->drop_next = codel_control_law(now, params->interval, vars->rec_inv_sqrt); } end: if (skb && codel_time_after(vars->ldelay, params->ce_threshold)) { bool set_ce = true; if (params->ce_threshold_mask) { int dsfield = skb_get_dsfield(skb); set_ce = (dsfield >= 0 && (((u8)dsfield & params->ce_threshold_mask) == params->ce_threshold_selector)); } if (set_ce && INET_ECN_set_ce(skb)) stats->ce_mark++; } return skb; } #endif |
225 226 226 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 | // SPDX-License-Identifier: GPL-2.0 /* * This file contains the procedures for the handling of select and poll * * Created for Linux based loosely upon Mathius Lattner's minix * patches by Peter MacDonald. Heavily edited by Linus. * * 4 February 1994 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS * flag set in its personality we do *not* modify the given timeout * parameter to reflect time remaining. * * 24 January 2000 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). */ #include <linux/compat.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/sched/rt.h> #include <linux/syscalls.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/personality.h> /* for STICKY_TIMEOUTS */ #include <linux/file.h> #include <linux/fdtable.h> #include <linux/fs.h> #include <linux/rcupdate.h> #include <linux/hrtimer.h> #include <linux/freezer.h> #include <net/busy_poll.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> /* * Estimate expected accuracy in ns from a timeval. * * After quite a bit of churning around, we've settled on * a simple thing of taking 0.1% of the timeout as the * slack, with a cap of 100 msec. * "nice" tasks get a 0.5% slack instead. * * Consider this comment an open invitation to come up with even * better solutions.. */ #define MAX_SLACK (100 * NSEC_PER_MSEC) static long __estimate_accuracy(struct timespec64 *tv) { long slack; int divfactor = 1000; if (tv->tv_sec < 0) return 0; if (task_nice(current) > 0) divfactor = divfactor / 5; if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) return MAX_SLACK; slack = tv->tv_nsec / divfactor; slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); if (slack > MAX_SLACK) return MAX_SLACK; return slack; } u64 select_estimate_accuracy(struct timespec64 *tv) { u64 ret; struct timespec64 now; u64 slack = current->timer_slack_ns; if (slack == 0) return 0; ktime_get_ts64(&now); now = timespec64_sub(*tv, now); ret = __estimate_accuracy(&now); if (ret < slack) return slack; return ret; } struct poll_table_page { struct poll_table_page * next; struct poll_table_entry * entry; struct poll_table_entry entries[]; }; #define POLL_TABLE_FULL(table) \ ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) /* * Ok, Peter made a complicated, but straightforward multiple_wait() function. * I have rewritten this, taking some shortcuts: This code may not be easy to * follow, but it should be free of race-conditions, and it's practical. If you * understand what I'm doing here, then you understand how the linux * sleep/wakeup mechanism works. * * Two very simple procedures, poll_wait() and poll_freewait() make all the * work. poll_wait() is an inline-function defined in <linux/poll.h>, * as all select/poll functions have to call it to add an entry to the * poll table. */ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p); void poll_initwait(struct poll_wqueues *pwq) { init_poll_funcptr(&pwq->pt, __pollwait); pwq->polling_task = current; pwq->triggered = 0; pwq->error = 0; pwq->table = NULL; pwq->inline_index = 0; } EXPORT_SYMBOL(poll_initwait); static void free_poll_entry(struct poll_table_entry *entry) { remove_wait_queue(entry->wait_address, &entry->wait); fput(entry->filp); } void poll_freewait(struct poll_wqueues *pwq) { struct poll_table_page * p = pwq->table; int i; for (i = 0; i < pwq->inline_index; i++) free_poll_entry(pwq->inline_entries + i); while (p) { struct poll_table_entry * entry; struct poll_table_page *old; entry = p->entry; do { entry--; free_poll_entry(entry); } while (entry > p->entries); old = p; p = p->next; free_page((unsigned long) old); } } EXPORT_SYMBOL(poll_freewait); static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) { struct poll_table_page *table = p->table; if (p->inline_index < N_INLINE_POLL_ENTRIES) return p->inline_entries + p->inline_index++; if (!table || POLL_TABLE_FULL(table)) { struct poll_table_page *new_table; new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); if (!new_table) { p->error = -ENOMEM; return NULL; } new_table->entry = new_table->entries; new_table->next = table; p->table = new_table; table = new_table; } return table->entry++; } static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct poll_wqueues *pwq = wait->private; DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); /* * Although this function is called under waitqueue lock, LOCK * doesn't imply write barrier and the users expect write * barrier semantics on wakeup functions. The following * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() * and is paired with smp_store_mb() in poll_schedule_timeout. */ smp_wmb(); pwq->triggered = 1; /* * Perform the default wake up operation using a dummy * waitqueue. * * TODO: This is hacky but there currently is no interface to * pass in @sync. @sync is scheduled to be removed and once * that happens, wake_up_process() can be used directly. */ return default_wake_function(&dummy_wait, mode, sync, key); } static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct poll_table_entry *entry; entry = container_of(wait, struct poll_table_entry, wait); if (key && !(key_to_poll(key) & entry->key)) return 0; return __pollwake(wait, mode, sync, key); } /* Add a new entry */ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) { struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); struct poll_table_entry *entry = poll_get_entry(pwq); if (!entry) return; entry->filp = get_file(filp); entry->wait_address = wait_address; entry->key = p->_key; init_waitqueue_func_entry(&entry->wait, pollwake); entry->wait.private = pwq; add_wait_queue(wait_address, &entry->wait); } static int poll_schedule_timeout(struct poll_wqueues *pwq, int state, ktime_t *expires, unsigned long slack) { int rc = -EINTR; set_current_state(state); if (!pwq->triggered) rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); __set_current_state(TASK_RUNNING); /* * Prepare for the next iteration. * * The following smp_store_mb() serves two purposes. First, it's * the counterpart rmb of the wmb in pollwake() such that data * written before wake up is always visible after wake up. * Second, the full barrier guarantees that triggered clearing * doesn't pass event check of the next iteration. Note that * this problem doesn't exist for the first iteration as * add_wait_queue() has full barrier semantics. */ smp_store_mb(pwq->triggered, 0); return rc; } /** * poll_select_set_timeout - helper function to setup the timeout value * @to: pointer to timespec64 variable for the final timeout * @sec: seconds (from user space) * @nsec: nanoseconds (from user space) * * Note, we do not use a timespec for the user space value here, That * way we can use the function for timeval and compat interfaces as well. * * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. */ int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec) { struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec}; if (!timespec64_valid(&ts)) return -EINVAL; /* Optimize for the zero timeout value here */ if (!sec && !nsec) { to->tv_sec = to->tv_nsec = 0; } else { ktime_get_ts64(to); *to = timespec64_add_safe(*to, ts); } return 0; } enum poll_time_type { PT_TIMEVAL = 0, PT_OLD_TIMEVAL = 1, PT_TIMESPEC = 2, PT_OLD_TIMESPEC = 3, }; static int poll_select_finish(struct timespec64 *end_time, void __user *p, enum poll_time_type pt_type, int ret) { struct timespec64 rts; restore_saved_sigmask_unless(ret == -ERESTARTNOHAND); if (!p) return ret; if (current->personality & STICKY_TIMEOUTS) goto sticky; /* No update for zero timeout */ if (!end_time->tv_sec && !end_time->tv_nsec) return ret; ktime_get_ts64(&rts); rts = timespec64_sub(*end_time, rts); if (rts.tv_sec < 0) rts.tv_sec = rts.tv_nsec = 0; switch (pt_type) { case PT_TIMEVAL: { struct __kernel_old_timeval rtv; if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) memset(&rtv, 0, sizeof(rtv)); rtv.tv_sec = rts.tv_sec; rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; if (!copy_to_user(p, &rtv, sizeof(rtv))) return ret; } break; case PT_OLD_TIMEVAL: { struct old_timeval32 rtv; rtv.tv_sec = rts.tv_sec; rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; if (!copy_to_user(p, &rtv, sizeof(rtv))) return ret; } break; case PT_TIMESPEC: if (!put_timespec64(&rts, p)) return ret; break; case PT_OLD_TIMESPEC: if (!put_old_timespec32(&rts, p)) return ret; break; default: BUG(); } /* * If an application puts its timeval in read-only memory, we * don't want the Linux-specific update to the timeval to * cause a fault after the select has completed * successfully. However, because we're not updating the * timeval, we can't restart the system call. */ sticky: if (ret == -ERESTARTNOHAND) ret = -EINTR; return ret; } /* * Scalable version of the fd_set. */ typedef struct { unsigned long *in, *out, *ex; unsigned long *res_in, *res_out, *res_ex; } fd_set_bits; /* * How many longwords for "nr" bits? */ #define FDS_BITPERLONG (8*sizeof(long)) #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) /* * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. */ static inline int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) { nr = FDS_BYTES(nr); if (ufdset) return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; memset(fdset, 0, nr); return 0; } static inline unsigned long __must_check set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) { if (ufdset) return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); return 0; } static inline void zero_fd_set(unsigned long nr, unsigned long *fdset) { memset(fdset, 0, FDS_BYTES(nr)); } #define FDS_IN(fds, n) (fds->in + n) #define FDS_OUT(fds, n) (fds->out + n) #define FDS_EX(fds, n) (fds->ex + n) #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) static int max_select_fd(unsigned long n, fd_set_bits *fds) { unsigned long *open_fds; unsigned long set; int max; struct fdtable *fdt; /* handle last in-complete long-word first */ set = ~(~0UL << (n & (BITS_PER_LONG-1))); n /= BITS_PER_LONG; fdt = files_fdtable(current->files); open_fds = fdt->open_fds + n; max = 0; if (set) { set &= BITS(fds, n); if (set) { if (!(set & ~*open_fds)) goto get_max; return -EBADF; } } while (n) { open_fds--; n--; set = BITS(fds, n); if (!set) continue; if (set & ~*open_fds) return -EBADF; if (max) continue; get_max: do { max++; set >>= 1; } while (set); max += n * BITS_PER_LONG; } return max; } #define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\ EPOLLNVAL) #define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\ EPOLLNVAL) #define POLLEX_SET (EPOLLPRI | EPOLLNVAL) static inline __poll_t select_poll_one(int fd, poll_table *wait, unsigned long in, unsigned long out, unsigned long bit, __poll_t ll_flag) { CLASS(fd, f)(fd); if (fd_empty(f)) return EPOLLNVAL; wait->_key = POLLEX_SET | ll_flag; if (in & bit) wait->_key |= POLLIN_SET; if (out & bit) wait->_key |= POLLOUT_SET; return vfs_poll(fd_file(f), wait); } static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) { ktime_t expire, *to = NULL; struct poll_wqueues table; poll_table *wait; int retval, i, timed_out = 0; u64 slack = 0; __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; unsigned long busy_start = 0; rcu_read_lock(); retval = max_select_fd(n, fds); rcu_read_unlock(); if (retval < 0) return retval; n = retval; poll_initwait(&table); wait = &table.pt; if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { wait->_qproc = NULL; timed_out = 1; } if (end_time && !timed_out) slack = select_estimate_accuracy(end_time); retval = 0; for (;;) { unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; bool can_busy_loop = false; inp = fds->in; outp = fds->out; exp = fds->ex; rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; for (i = 0; i < n; ++rinp, ++routp, ++rexp) { unsigned long in, out, ex, all_bits, bit = 1, j; unsigned long res_in = 0, res_out = 0, res_ex = 0; __poll_t mask; in = *inp++; out = *outp++; ex = *exp++; all_bits = in | out | ex; if (all_bits == 0) { i += BITS_PER_LONG; continue; } for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) { if (i >= n) break; if (!(bit & all_bits)) continue; mask = select_poll_one(i, wait, in, out, bit, busy_flag); if ((mask & POLLIN_SET) && (in & bit)) { res_in |= bit; retval++; wait->_qproc = NULL; } if ((mask & POLLOUT_SET) && (out & bit)) { res_out |= bit; retval++; wait->_qproc = NULL; } if ((mask & POLLEX_SET) && (ex & bit)) { res_ex |= bit; retval++; wait->_qproc = NULL; } /* got something, stop busy polling */ if (retval) { can_busy_loop = false; busy_flag = 0; /* * only remember a returned * POLL_BUSY_LOOP if we asked for it */ } else if (busy_flag & mask) can_busy_loop = true; } if (res_in) *rinp = res_in; if (res_out) *routp = res_out; if (res_ex) *rexp = res_ex; cond_resched(); } wait->_qproc = NULL; if (retval || timed_out || signal_pending(current)) break; if (table.error) { retval = table.error; break; } /* only if found POLL_BUSY_LOOP sockets && not out of time */ if (can_busy_loop && !need_resched()) { if (!busy_start) { busy_start = busy_loop_current_time(); continue; } if (!busy_loop_timeout(busy_start)) continue; } busy_flag = 0; /* * If this is the first loop and we have a timeout * given, then we convert to ktime_t and set the to * pointer to the expiry value. */ if (end_time && !to) { expire = timespec64_to_ktime(*end_time); to = &expire; } if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, to, slack)) timed_out = 1; } poll_freewait(&table); return retval; } /* * We can actually return ERESTARTSYS instead of EINTR, but I'd * like to be certain this leads to no problems. So I return * EINTR just for safety. * * Update: ERESTARTSYS breaks at least the xview clock binary, so * I'm trying ERESTARTNOHAND which restart only when you want to. */ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timespec64 *end_time) { fd_set_bits fds; void *bits; int ret, max_fds; size_t size, alloc_size; struct fdtable *fdt; /* Allocate small arguments on the stack to save memory and be faster */ long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; ret = -EINVAL; if (unlikely(n < 0)) goto out_nofds; /* max_fds can increase, so grab it once to avoid race */ rcu_read_lock(); fdt = files_fdtable(current->files); max_fds = fdt->max_fds; rcu_read_unlock(); if (n > max_fds) n = max_fds; /* * We need 6 bitmaps (in/out/ex for both incoming and outgoing), * since we used fdset we need to allocate memory in units of * long-words. */ size = FDS_BYTES(n); bits = stack_fds; if (size > sizeof(stack_fds) / 6) { /* Not enough space in on-stack array; must use kmalloc */ ret = -ENOMEM; if (size > (SIZE_MAX / 6)) goto out_nofds; alloc_size = 6 * size; bits = kvmalloc(alloc_size, GFP_KERNEL); if (!bits) goto out_nofds; } fds.in = bits; fds.out = bits + size; fds.ex = bits + 2*size; fds.res_in = bits + 3*size; fds.res_out = bits + 4*size; fds.res_ex = bits + 5*size; if ((ret = get_fd_set(n, inp, fds.in)) || (ret = get_fd_set(n, outp, fds.out)) || (ret = get_fd_set(n, exp, fds.ex))) goto out; zero_fd_set(n, fds.res_in); zero_fd_set(n, fds.res_out); zero_fd_set(n, fds.res_ex); ret = do_select(n, &fds, end_time); if (ret < 0) goto out; if (!ret) { ret = -ERESTARTNOHAND; if (signal_pending(current)) goto out; ret = 0; } if (set_fd_set(n, inp, fds.res_in) || set_fd_set(n, outp, fds.res_out) || set_fd_set(n, exp, fds.res_ex)) ret = -EFAULT; out: if (bits != stack_fds) kvfree(bits); out_nofds: return ret; } static int kern_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp) { struct timespec64 end_time, *to = NULL; struct __kernel_old_timeval tv; int ret; if (tvp) { if (copy_from_user(&tv, tvp, sizeof(tv))) return -EFAULT; to = &end_time; if (poll_select_set_timeout(to, tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) return -EINVAL; } ret = core_sys_select(n, inp, outp, exp, to); return poll_select_finish(&end_time, tvp, PT_TIMEVAL, ret); } SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct __kernel_old_timeval __user *, tvp) { return kern_select(n, inp, outp, exp, tvp); } static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, void __user *tsp, const sigset_t __user *sigmask, size_t sigsetsize, enum poll_time_type type) { struct timespec64 ts, end_time, *to = NULL; int ret; if (tsp) { switch (type) { case PT_TIMESPEC: if (get_timespec64(&ts, tsp)) return -EFAULT; break; case PT_OLD_TIMESPEC: if (get_old_timespec32(&ts, tsp)) return -EFAULT; break; default: BUG(); } to = &end_time; if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; } ret = set_user_sigmask(sigmask, sigsetsize); if (ret) return ret; ret = core_sys_select(n, inp, outp, exp, to); return poll_select_finish(&end_time, tsp, type, ret); } /* * Most architectures can't handle 7-argument syscalls. So we provide a * 6-argument version where the sixth argument is a pointer to a structure * which has a pointer to the sigset_t itself followed by a size_t containing * the sigset size. */ struct sigset_argpack { sigset_t __user *p; size_t size; }; static inline int get_sigset_argpack(struct sigset_argpack *to, struct sigset_argpack __user *from) { // the path is hot enough for overhead of copy_from_user() to matter if (from) { if (can_do_masked_user_access()) from = masked_user_access_begin(from); else if (!user_read_access_begin(from, sizeof(*from))) return -EFAULT; unsafe_get_user(to->p, &from->p, Efault); unsafe_get_user(to->size, &from->size, Efault); user_read_access_end(); } return 0; Efault: user_read_access_end(); return -EFAULT; } SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct __kernel_timespec __user *, tsp, void __user *, sig) { struct sigset_argpack x = {NULL, 0}; if (get_sigset_argpack(&x, sig)) return -EFAULT; return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_TIMESPEC); } #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct old_timespec32 __user *, tsp, void __user *, sig) { struct sigset_argpack x = {NULL, 0}; if (get_sigset_argpack(&x, sig)) return -EFAULT; return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_OLD_TIMESPEC); } #endif #ifdef __ARCH_WANT_SYS_OLD_SELECT struct sel_arg_struct { unsigned long n; fd_set __user *inp, *outp, *exp; struct __kernel_old_timeval __user *tvp; }; SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg) { struct sel_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp); } #endif struct poll_list { struct poll_list *next; unsigned int len; struct pollfd entries[] __counted_by(len); }; #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) /* * Fish for pollable events on the pollfd->fd file descriptor. We're only * interested in events matching the pollfd->events mask, and the result * matching that mask is both recorded in pollfd->revents and returned. The * pwait poll_table will be used by the fd-provided poll handler for waiting, * if pwait->_qproc is non-NULL. */ static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait, bool *can_busy_poll, __poll_t busy_flag) { int fd = pollfd->fd; __poll_t mask, filter; if (unlikely(fd < 0)) return 0; CLASS(fd, f)(fd); if (fd_empty(f)) return EPOLLNVAL; /* userland u16 ->events contains POLL... bitmap */ filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP; pwait->_key = filter | busy_flag; mask = vfs_poll(fd_file(f), pwait); if (mask & busy_flag) *can_busy_poll = true; return mask & filter; /* Mask out unneeded events. */ } static int do_poll(struct poll_list *list, struct poll_wqueues *wait, struct timespec64 *end_time) { poll_table* pt = &wait->pt; ktime_t expire, *to = NULL; int timed_out = 0, count = 0; u64 slack = 0; __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; unsigned long busy_start = 0; /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { pt->_qproc = NULL; timed_out = 1; } if (end_time && !timed_out) slack = select_estimate_accuracy(end_time); for (;;) { struct poll_list *walk; bool can_busy_loop = false; for (walk = list; walk != NULL; walk = walk->next) { struct pollfd * pfd, * pfd_end; pfd = walk->entries; pfd_end = pfd + walk->len; for (; pfd != pfd_end; pfd++) { __poll_t mask; /* * Fish for events. If we found one, record it * and kill poll_table->_qproc, so we don't * needlessly register any other waiters after * this. They'll get immediately deregistered * when we break out and return. */ mask = do_pollfd(pfd, pt, &can_busy_loop, busy_flag); pfd->revents = mangle_poll(mask); if (mask) { count++; pt->_qproc = NULL; /* found something, stop busy polling */ busy_flag = 0; can_busy_loop = false; } } } /* * All waiters have already been registered, so don't provide * a poll_table->_qproc to them on the next loop iteration. */ pt->_qproc = NULL; if (!count) { count = wait->error; if (signal_pending(current)) count = -ERESTARTNOHAND; } if (count || timed_out) break; /* only if found POLL_BUSY_LOOP sockets && not out of time */ if (can_busy_loop && !need_resched()) { if (!busy_start) { busy_start = busy_loop_current_time(); continue; } if (!busy_loop_timeout(busy_start)) continue; } busy_flag = 0; /* * If this is the first loop and we have a timeout * given, then we convert to ktime_t and set the to * pointer to the expiry value. */ if (end_time && !to) { expire = timespec64_to_ktime(*end_time); to = &expire; } if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) timed_out = 1; } return count; } #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ sizeof(struct pollfd)) static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, struct timespec64 *end_time) { struct poll_wqueues table; int err = -EFAULT, fdcount; /* Allocate small arguments on the stack to save memory and be faster - use long to make sure the buffer is aligned properly on 64 bit archs to avoid unaligned access */ long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; struct poll_list *const head = (struct poll_list *)stack_pps; struct poll_list *walk = head; unsigned int todo = nfds; unsigned int len; if (nfds > rlimit(RLIMIT_NOFILE)) return -EINVAL; len = min_t(unsigned int, nfds, N_STACK_PPS); for (;;) { walk->next = NULL; walk->len = len; if (!len) break; if (copy_from_user(walk->entries, ufds + nfds-todo, sizeof(struct pollfd) * walk->len)) goto out_fds; if (walk->len >= todo) break; todo -= walk->len; len = min(todo, POLLFD_PER_PAGE); walk = walk->next = kmalloc(struct_size(walk, entries, len), GFP_KERNEL); if (!walk) { err = -ENOMEM; goto out_fds; } } poll_initwait(&table); fdcount = do_poll(head, &table, end_time); poll_freewait(&table); if (!user_write_access_begin(ufds, nfds * sizeof(*ufds))) goto out_fds; for (walk = head; walk; walk = walk->next) { struct pollfd *fds = walk->entries; unsigned int j; for (j = walk->len; j; fds++, ufds++, j--) unsafe_put_user(fds->revents, &ufds->revents, Efault); } user_write_access_end(); err = fdcount; out_fds: walk = head->next; while (walk) { struct poll_list *pos = walk; walk = walk->next; kfree(pos); } return err; Efault: user_write_access_end(); err = -EFAULT; goto out_fds; } static long do_restart_poll(struct restart_block *restart_block) { struct pollfd __user *ufds = restart_block->poll.ufds; int nfds = restart_block->poll.nfds; struct timespec64 *to = NULL, end_time; int ret; if (restart_block->poll.has_timeout) { end_time.tv_sec = restart_block->poll.tv_sec; end_time.tv_nsec = restart_block->poll.tv_nsec; to = &end_time; } ret = do_sys_poll(ufds, nfds, to); if (ret == -ERESTARTNOHAND) ret = set_restart_fn(restart_block, do_restart_poll); return ret; } SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout_msecs) { struct timespec64 end_time, *to = NULL; int ret; if (timeout_msecs >= 0) { to = &end_time; poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); } ret = do_sys_poll(ufds, nfds, to); if (ret == -ERESTARTNOHAND) { struct restart_block *restart_block; restart_block = ¤t->restart_block; restart_block->poll.ufds = ufds; restart_block->poll.nfds = nfds; if (timeout_msecs >= 0) { restart_block->poll.tv_sec = end_time.tv_sec; restart_block->poll.tv_nsec = end_time.tv_nsec; restart_block->poll.has_timeout = 1; } else restart_block->poll.has_timeout = 0; ret = set_restart_fn(restart_block, do_restart_poll); } return ret; } SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask, size_t, sigsetsize) { struct timespec64 ts, end_time, *to = NULL; int ret; if (tsp) { if (get_timespec64(&ts, tsp)) return -EFAULT; to = &end_time; if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; } ret = set_user_sigmask(sigmask, sigsetsize); if (ret) return ret; ret = do_sys_poll(ufds, nfds, to); return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret); } #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds, struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask, size_t, sigsetsize) { struct timespec64 ts, end_time, *to = NULL; int ret; if (tsp) { if (get_old_timespec32(&ts, tsp)) return -EFAULT; to = &end_time; if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; } ret = set_user_sigmask(sigmask, sigsetsize); if (ret) return ret; ret = do_sys_poll(ufds, nfds, to); return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret); } #endif #ifdef CONFIG_COMPAT #define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t)) /* * Ooo, nasty. We need here to frob 32-bit unsigned longs to * 64-bit unsigned longs. */ static int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, unsigned long *fdset) { if (ufdset) { return compat_get_bitmap(fdset, ufdset, nr); } else { zero_fd_set(nr, fdset); return 0; } } static int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, unsigned long *fdset) { if (!ufdset) return 0; return compat_put_bitmap(ufdset, fdset, nr); } /* * This is a virtual copy of sys_select from fs/select.c and probably * should be compared to it from time to time */ /* * We can actually return ERESTARTSYS instead of EINTR, but I'd * like to be certain this leads to no problems. So I return * EINTR just for safety. * * Update: ERESTARTSYS breaks at least the xview clock binary, so * I'm trying ERESTARTNOHAND which restart only when you want to. */ static int compat_core_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct timespec64 *end_time) { fd_set_bits fds; void *bits; int size, max_fds, ret = -EINVAL; struct fdtable *fdt; long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; if (n < 0) goto out_nofds; /* max_fds can increase, so grab it once to avoid race */ rcu_read_lock(); fdt = files_fdtable(current->files); max_fds = fdt->max_fds; rcu_read_unlock(); if (n > max_fds) n = max_fds; /* * We need 6 bitmaps (in/out/ex for both incoming and outgoing), * since we used fdset we need to allocate memory in units of * long-words. */ size = FDS_BYTES(n); bits = stack_fds; if (size > sizeof(stack_fds) / 6) { bits = kmalloc_array(6, size, GFP_KERNEL); ret = -ENOMEM; if (!bits) goto out_nofds; } fds.in = (unsigned long *) bits; fds.out = (unsigned long *) (bits + size); fds.ex = (unsigned long *) (bits + 2*size); fds.res_in = (unsigned long *) (bits + 3*size); fds.res_out = (unsigned long *) (bits + 4*size); fds.res_ex = (unsigned long *) (bits + 5*size); if ((ret = compat_get_fd_set(n, inp, fds.in)) || (ret = compat_get_fd_set(n, outp, fds.out)) || (ret = compat_get_fd_set(n, exp, fds.ex))) goto out; zero_fd_set(n, fds.res_in); zero_fd_set(n, fds.res_out); zero_fd_set(n, fds.res_ex); ret = do_select(n, &fds, end_time); if (ret < 0) goto out; if (!ret) { ret = -ERESTARTNOHAND; if (signal_pending(current)) goto out; ret = 0; } if (compat_set_fd_set(n, inp, fds.res_in) || compat_set_fd_set(n, outp, fds.res_out) || compat_set_fd_set(n, exp, fds.res_ex)) ret = -EFAULT; out: if (bits != stack_fds) kfree(bits); out_nofds: return ret; } static int do_compat_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct old_timeval32 __user *tvp) { struct timespec64 end_time, *to = NULL; struct old_timeval32 tv; int ret; if (tvp) { if (copy_from_user(&tv, tvp, sizeof(tv))) return -EFAULT; to = &end_time; if (poll_select_set_timeout(to, tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) return -EINVAL; } ret = compat_core_sys_select(n, inp, outp, exp, to); return poll_select_finish(&end_time, tvp, PT_OLD_TIMEVAL, ret); } COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, struct old_timeval32 __user *, tvp) { return do_compat_select(n, inp, outp, exp, tvp); } struct compat_sel_arg_struct { compat_ulong_t n; compat_uptr_t inp; compat_uptr_t outp; compat_uptr_t exp; compat_uptr_t tvp; }; COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg) { struct compat_sel_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), compat_ptr(a.exp), compat_ptr(a.tvp)); } static long do_compat_pselect(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, void __user *tsp, compat_sigset_t __user *sigmask, compat_size_t sigsetsize, enum poll_time_type type) { struct timespec64 ts, end_time, *to = NULL; int ret; if (tsp) { switch (type) { case PT_OLD_TIMESPEC: if (get_old_timespec32(&ts, tsp)) return -EFAULT; break; case PT_TIMESPEC: if (get_timespec64(&ts, tsp)) return -EFAULT; break; default: BUG(); } to = &end_time; if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; } ret = set_compat_user_sigmask(sigmask, sigsetsize); if (ret) return ret; ret = compat_core_sys_select(n, inp, outp, exp, to); return poll_select_finish(&end_time, tsp, type, ret); } struct compat_sigset_argpack { compat_uptr_t p; compat_size_t size; }; static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to, struct compat_sigset_argpack __user *from) { if (from) { if (!user_read_access_begin(from, sizeof(*from))) return -EFAULT; unsafe_get_user(to->p, &from->p, Efault); unsafe_get_user(to->size, &from->size, Efault); user_read_access_end(); } return 0; Efault: user_read_access_end(); return -EFAULT; } COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp, compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, struct __kernel_timespec __user *, tsp, void __user *, sig) { struct compat_sigset_argpack x = {0, 0}; if (get_compat_sigset_argpack(&x, sig)) return -EFAULT; return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p), x.size, PT_TIMESPEC); } #if defined(CONFIG_COMPAT_32BIT_TIME) COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp, compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, struct old_timespec32 __user *, tsp, void __user *, sig) { struct compat_sigset_argpack x = {0, 0}; if (get_compat_sigset_argpack(&x, sig)) return -EFAULT; return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p), x.size, PT_OLD_TIMESPEC); } #endif #if defined(CONFIG_COMPAT_32BIT_TIME) COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds, struct old_timespec32 __user *, tsp, const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) { struct timespec64 ts, end_time, *to = NULL; int ret; if (tsp) { if (get_old_timespec32(&ts, tsp)) return -EFAULT; to = &end_time; if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; } ret = set_compat_user_sigmask(sigmask, sigsetsize); if (ret) return ret; ret = do_sys_poll(ufds, nfds, to); return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret); } #endif /* New compat syscall for 64 bit time_t*/ COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds, unsigned int, nfds, struct __kernel_timespec __user *, tsp, const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) { struct timespec64 ts, end_time, *to = NULL; int ret; if (tsp) { if (get_timespec64(&ts, tsp)) return -EFAULT; to = &end_time; if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; } ret = set_compat_user_sigmask(sigmask, sigsetsize); if (ret) return ret; ret = do_sys_poll(ufds, nfds, to); return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret); } #endif |
9 5 9 6 6 6 3 3 3 6 6 7 6 7 1 6 1 1 1 1 1 1 3 6 6 5 6 3 6 15 15 15 13 13 12 15 4 11 11 11 4 3 2 9 9 9 7 7 6 7 4 2 5 6 6 3 3 1 3 6 6 8 8 12 15 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 | // SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/media/radio/si470x/radio-si470x-usb.c * * USB driver for radios with Silicon Labs Si470x FM Radio Receivers * * Copyright (c) 2009 Tobias Lorenz <tobias.lorenz@gmx.net> */ /* * ToDo: * - add firmware download/update support */ /* driver definitions */ #define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>" #define DRIVER_CARD "Silicon Labs Si470x FM Radio" #define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers" #define DRIVER_VERSION "1.0.10" /* kernel includes */ #include <linux/usb.h> #include <linux/hid.h> #include <linux/slab.h> #include "radio-si470x.h" /* USB Device ID List */ static const struct usb_device_id si470x_usb_driver_id_table[] = { /* Silicon Labs USB FM Radio Reference Design */ { USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) }, /* ADS/Tech FM Radio Receiver (formerly Instant FM Music) */ { USB_DEVICE_AND_INTERFACE_INFO(0x06e1, 0xa155, USB_CLASS_HID, 0, 0) }, /* KWorld USB FM Radio SnapMusic Mobile 700 (FM700) */ { USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) }, /* Sanei Electric, Inc. FM USB Radio (sold as DealExtreme.com PCear) */ { USB_DEVICE_AND_INTERFACE_INFO(0x10c5, 0x819a, USB_CLASS_HID, 0, 0) }, /* Axentia ALERT FM USB Receiver */ { USB_DEVICE_AND_INTERFACE_INFO(0x12cf, 0x7111, USB_CLASS_HID, 0, 0) }, /* Terminating entry */ { } }; MODULE_DEVICE_TABLE(usb, si470x_usb_driver_id_table); /************************************************************************** * Module Parameters **************************************************************************/ /* Radio Nr */ static int radio_nr = -1; module_param(radio_nr, int, 0444); MODULE_PARM_DESC(radio_nr, "Radio Nr"); /* USB timeout */ static unsigned int usb_timeout = 500; module_param(usb_timeout, uint, 0644); MODULE_PARM_DESC(usb_timeout, "USB timeout (ms): *500*"); /* RDS buffer blocks */ static unsigned int rds_buf = 100; module_param(rds_buf, uint, 0444); MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*"); /* RDS maximum block errors */ static unsigned short max_rds_errors = 1; /* 0 means 0 errors requiring correction */ /* 1 means 1-2 errors requiring correction (used by original USBRadio.exe) */ /* 2 means 3-5 errors requiring correction */ /* 3 means 6+ errors or errors in checkword, correction not possible */ module_param(max_rds_errors, ushort, 0644); MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*"); /************************************************************************** * USB HID Reports **************************************************************************/ /* Reports 1-16 give direct read/write access to the 16 Si470x registers */ /* with the (REPORT_ID - 1) corresponding to the register address across USB */ /* endpoint 0 using GET_REPORT and SET_REPORT */ #define REGISTER_REPORT_SIZE (RADIO_REGISTER_SIZE + 1) #define REGISTER_REPORT(reg) ((reg) + 1) /* Report 17 gives direct read/write access to the entire Si470x register */ /* map across endpoint 0 using GET_REPORT and SET_REPORT */ #define ENTIRE_REPORT_SIZE (RADIO_REGISTER_NUM * RADIO_REGISTER_SIZE + 1) #define ENTIRE_REPORT 17 /* Report 18 is used to send the lowest 6 Si470x registers up the HID */ /* interrupt endpoint 1 to Windows every 20 milliseconds for status */ #define RDS_REPORT_SIZE (RDS_REGISTER_NUM * RADIO_REGISTER_SIZE + 1) #define RDS_REPORT 18 /* Report 19: LED state */ #define LED_REPORT_SIZE 3 #define LED_REPORT 19 /* Report 19: stream */ #define STREAM_REPORT_SIZE 3 #define STREAM_REPORT 19 /* Report 20: scratch */ #define SCRATCH_PAGE_SIZE 63 #define SCRATCH_REPORT_SIZE (SCRATCH_PAGE_SIZE + 1) #define SCRATCH_REPORT 20 /* Reports 19-22: flash upgrade of the C8051F321 */ #define WRITE_REPORT_SIZE 4 #define WRITE_REPORT 19 #define FLASH_REPORT_SIZE 64 #define FLASH_REPORT 20 #define CRC_REPORT_SIZE 3 #define CRC_REPORT 21 #define RESPONSE_REPORT_SIZE 2 #define RESPONSE_REPORT 22 /* Report 23: currently unused, but can accept 60 byte reports on the HID */ /* interrupt out endpoint 2 every 1 millisecond */ #define UNUSED_REPORT 23 #define MAX_REPORT_SIZE 64 /************************************************************************** * Software/Hardware Versions from Scratch Page **************************************************************************/ #define RADIO_HW_VERSION 1 /************************************************************************** * LED State Definitions **************************************************************************/ #define LED_COMMAND 0x35 #define NO_CHANGE_LED 0x00 #define ALL_COLOR_LED 0x01 /* streaming state */ #define BLINK_GREEN_LED 0x02 /* connect state */ #define BLINK_RED_LED 0x04 #define BLINK_ORANGE_LED 0x10 /* disconnect state */ #define SOLID_GREEN_LED 0x20 /* tuning/seeking state */ #define SOLID_RED_LED 0x40 /* bootload state */ #define SOLID_ORANGE_LED 0x80 /************************************************************************** * Stream State Definitions **************************************************************************/ #define STREAM_COMMAND 0x36 #define STREAM_VIDPID 0x00 #define STREAM_AUDIO 0xff /************************************************************************** * Bootloader / Flash Commands **************************************************************************/ /* unique id sent to bootloader and required to put into a bootload state */ #define UNIQUE_BL_ID 0x34 /* mask for the flash data */ #define FLASH_DATA_MASK 0x55 /* bootloader commands */ #define GET_SW_VERSION_COMMAND 0x00 #define SET_PAGE_COMMAND 0x01 #define ERASE_PAGE_COMMAND 0x02 #define WRITE_PAGE_COMMAND 0x03 #define CRC_ON_PAGE_COMMAND 0x04 #define READ_FLASH_BYTE_COMMAND 0x05 #define RESET_DEVICE_COMMAND 0x06 #define GET_HW_VERSION_COMMAND 0x07 #define BLANK 0xff /* bootloader command responses */ #define COMMAND_OK 0x01 #define COMMAND_FAILED 0x02 #define COMMAND_PENDING 0x03 /************************************************************************** * General Driver Functions - REGISTER_REPORTs **************************************************************************/ /* * si470x_get_report - receive a HID report */ static int si470x_get_report(struct si470x_device *radio, void *buf, int size) { unsigned char *report = buf; int retval; retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), HID_REQ_GET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, report[0], 2, buf, size, usb_timeout); if (retval < 0) dev_warn(&radio->intf->dev, "si470x_get_report: usb_control_msg returned %d\n", retval); return retval; } /* * si470x_set_report - send a HID report */ static int si470x_set_report(struct si470x_device *radio, void *buf, int size) { unsigned char *report = buf; int retval; retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), HID_REQ_SET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, report[0], 2, buf, size, usb_timeout); if (retval < 0) dev_warn(&radio->intf->dev, "si470x_set_report: usb_control_msg returned %d\n", retval); return retval; } /* * si470x_get_register - read register */ static int si470x_get_register(struct si470x_device *radio, int regnr) { int retval; radio->usb_buf[0] = REGISTER_REPORT(regnr); retval = si470x_get_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE); if (retval >= 0) radio->registers[regnr] = get_unaligned_be16(&radio->usb_buf[1]); return (retval < 0) ? -EINVAL : 0; } /* * si470x_set_register - write register */ static int si470x_set_register(struct si470x_device *radio, int regnr) { int retval; radio->usb_buf[0] = REGISTER_REPORT(regnr); put_unaligned_be16(radio->registers[regnr], &radio->usb_buf[1]); retval = si470x_set_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE); return (retval < 0) ? -EINVAL : 0; } /************************************************************************** * General Driver Functions - ENTIRE_REPORT **************************************************************************/ /* * si470x_get_all_registers - read entire registers */ static int si470x_get_all_registers(struct si470x_device *radio) { int retval; unsigned char regnr; radio->usb_buf[0] = ENTIRE_REPORT; retval = si470x_get_report(radio, radio->usb_buf, ENTIRE_REPORT_SIZE); if (retval >= 0) for (regnr = 0; regnr < RADIO_REGISTER_NUM; regnr++) radio->registers[regnr] = get_unaligned_be16( &radio->usb_buf[regnr * RADIO_REGISTER_SIZE + 1]); return (retval < 0) ? -EINVAL : 0; } /************************************************************************** * General Driver Functions - LED_REPORT **************************************************************************/ /* * si470x_set_led_state - sets the led state */ static int si470x_set_led_state(struct si470x_device *radio, unsigned char led_state) { int retval; radio->usb_buf[0] = LED_REPORT; radio->usb_buf[1] = LED_COMMAND; radio->usb_buf[2] = led_state; retval = si470x_set_report(radio, radio->usb_buf, LED_REPORT_SIZE); return (retval < 0) ? -EINVAL : 0; } /************************************************************************** * General Driver Functions - SCRATCH_REPORT **************************************************************************/ /* * si470x_get_scratch_versions - gets the scratch page and version infos */ static int si470x_get_scratch_page_versions(struct si470x_device *radio) { int retval; radio->usb_buf[0] = SCRATCH_REPORT; retval = si470x_get_report(radio, radio->usb_buf, SCRATCH_REPORT_SIZE); if (retval < 0) dev_warn(&radio->intf->dev, "si470x_get_scratch: si470x_get_report returned %d\n", retval); else { radio->software_version = radio->usb_buf[1]; radio->hardware_version = radio->usb_buf[2]; } return (retval < 0) ? -EINVAL : 0; } /************************************************************************** * RDS Driver Functions **************************************************************************/ /* * si470x_int_in_callback - rds callback and processing function * * TODO: do we need to use mutex locks in some sections? */ static void si470x_int_in_callback(struct urb *urb) { struct si470x_device *radio = urb->context; int retval; unsigned char regnr; unsigned char blocknum; unsigned short bler; /* rds block errors */ unsigned short rds; unsigned char tmpbuf[3]; if (urb->status) { if (urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN) { return; } else { dev_warn(&radio->intf->dev, "non-zero urb status (%d)\n", urb->status); goto resubmit; /* Maybe we can recover. */ } } /* Sometimes the device returns len 0 packets */ if (urb->actual_length != RDS_REPORT_SIZE) goto resubmit; radio->registers[STATUSRSSI] = get_unaligned_be16(&radio->int_in_buffer[1]); if (radio->registers[STATUSRSSI] & STATUSRSSI_STC) complete(&radio->completion); if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS)) { /* Update RDS registers with URB data */ for (regnr = 1; regnr < RDS_REGISTER_NUM; regnr++) radio->registers[STATUSRSSI + regnr] = get_unaligned_be16(&radio->int_in_buffer[ regnr * RADIO_REGISTER_SIZE + 1]); /* get rds blocks */ if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSR) == 0) { /* No RDS group ready, better luck next time */ goto resubmit; } if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSS) == 0) { /* RDS decoder not synchronized */ goto resubmit; } for (blocknum = 0; blocknum < 4; blocknum++) { switch (blocknum) { default: bler = (radio->registers[STATUSRSSI] & STATUSRSSI_BLERA) >> 9; rds = radio->registers[RDSA]; break; case 1: bler = (radio->registers[READCHAN] & READCHAN_BLERB) >> 14; rds = radio->registers[RDSB]; break; case 2: bler = (radio->registers[READCHAN] & READCHAN_BLERC) >> 12; rds = radio->registers[RDSC]; break; case 3: bler = (radio->registers[READCHAN] & READCHAN_BLERD) >> 10; rds = radio->registers[RDSD]; break; } /* Fill the V4L2 RDS buffer */ put_unaligned_le16(rds, &tmpbuf); tmpbuf[2] = blocknum; /* offset name */ tmpbuf[2] |= blocknum << 3; /* received offset */ if (bler > max_rds_errors) tmpbuf[2] |= 0x80; /* uncorrectable errors */ else if (bler > 0) tmpbuf[2] |= 0x40; /* corrected error(s) */ /* copy RDS block to internal buffer */ memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3); radio->wr_index += 3; /* wrap write pointer */ if (radio->wr_index >= radio->buf_size) radio->wr_index = 0; /* check for overflow */ if (radio->wr_index == radio->rd_index) { /* increment and wrap read pointer */ radio->rd_index += 3; if (radio->rd_index >= radio->buf_size) radio->rd_index = 0; } } if (radio->wr_index != radio->rd_index) wake_up_interruptible(&radio->read_queue); } resubmit: /* Resubmit if we're still running. */ if (radio->int_in_running && radio->usbdev) { retval = usb_submit_urb(radio->int_in_urb, GFP_ATOMIC); if (retval) { dev_warn(&radio->intf->dev, "resubmitting urb failed (%d)", retval); radio->int_in_running = 0; } } radio->status_rssi_auto_update = radio->int_in_running; } static int si470x_fops_open(struct file *file) { return v4l2_fh_open(file); } static int si470x_fops_release(struct file *file) { return v4l2_fh_release(file); } static void si470x_usb_release(struct v4l2_device *v4l2_dev) { struct si470x_device *radio = container_of(v4l2_dev, struct si470x_device, v4l2_dev); usb_free_urb(radio->int_in_urb); v4l2_ctrl_handler_free(&radio->hdl); v4l2_device_unregister(&radio->v4l2_dev); kfree(radio->int_in_buffer); kfree(radio->buffer); kfree(radio->usb_buf); kfree(radio); } /************************************************************************** * Video4Linux Interface **************************************************************************/ /* * si470x_vidioc_querycap - query device capabilities */ static int si470x_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *capability) { struct si470x_device *radio = video_drvdata(file); strscpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); strscpy(capability->card, DRIVER_CARD, sizeof(capability->card)); usb_make_path(radio->usbdev, capability->bus_info, sizeof(capability->bus_info)); return 0; } static int si470x_start_usb(struct si470x_device *radio) { int retval; /* initialize interrupt urb */ usb_fill_int_urb(radio->int_in_urb, radio->usbdev, usb_rcvintpipe(radio->usbdev, radio->int_in_endpoint->bEndpointAddress), radio->int_in_buffer, le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize), si470x_int_in_callback, radio, radio->int_in_endpoint->bInterval); radio->int_in_running = 1; mb(); retval = usb_submit_urb(radio->int_in_urb, GFP_KERNEL); if (retval) { dev_info(&radio->intf->dev, "submitting int urb failed (%d)\n", retval); radio->int_in_running = 0; } radio->status_rssi_auto_update = radio->int_in_running; /* start radio */ retval = si470x_start(radio); if (retval < 0) return retval; v4l2_ctrl_handler_setup(&radio->hdl); return retval; } /************************************************************************** * USB Interface **************************************************************************/ /* * si470x_usb_driver_probe - probe for the device */ static int si470x_usb_driver_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct si470x_device *radio; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int i, int_end_size, retval; unsigned char version_warning = 0; /* private data allocation and initialization */ radio = kzalloc(sizeof(struct si470x_device), GFP_KERNEL); if (!radio) { retval = -ENOMEM; goto err_initial; } radio->usb_buf = kmalloc(MAX_REPORT_SIZE, GFP_KERNEL); if (radio->usb_buf == NULL) { retval = -ENOMEM; goto err_radio; } radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; radio->band = 1; /* Default to 76 - 108 MHz */ mutex_init(&radio->lock); init_completion(&radio->completion); radio->get_register = si470x_get_register; radio->set_register = si470x_set_register; radio->fops_open = si470x_fops_open; radio->fops_release = si470x_fops_release; radio->vidioc_querycap = si470x_vidioc_querycap; iface_desc = intf->cur_altsetting; /* Set up interrupt endpoint information. */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) radio->int_in_endpoint = endpoint; } if (!radio->int_in_endpoint) { dev_info(&intf->dev, "could not find interrupt in endpoint\n"); retval = -EIO; goto err_usbbuf; } int_end_size = le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize); radio->int_in_buffer = kmalloc(int_end_size, GFP_KERNEL); if (!radio->int_in_buffer) { dev_info(&intf->dev, "could not allocate int_in_buffer"); retval = -ENOMEM; goto err_usbbuf; } radio->int_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!radio->int_in_urb) { retval = -ENOMEM; goto err_intbuffer; } radio->v4l2_dev.release = si470x_usb_release; /* * The si470x SiLabs reference design uses the same USB IDs as * 'Thanko's Raremono' si4734 based receiver. So check here which we * have: attempt to read the device ID from the si470x: the lower 12 * bits should be 0x0242 for the si470x. * * We use this check to determine which device we are dealing with. */ if (id->idVendor == 0x10c4 && id->idProduct == 0x818a) { retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), HID_REQ_GET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 1, 2, radio->usb_buf, 3, 500); if (retval != 3 || (get_unaligned_be16(&radio->usb_buf[1]) & 0xfff) != 0x0242) { dev_info(&intf->dev, "this is not a si470x device.\n"); retval = -ENODEV; goto err_urb; } } retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&intf->dev, "couldn't register v4l2_device\n"); goto err_urb; } v4l2_ctrl_handler_init(&radio->hdl, 2); v4l2_ctrl_new_std(&radio->hdl, &si470x_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1); v4l2_ctrl_new_std(&radio->hdl, &si470x_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 15, 1, 15); if (radio->hdl.error) { retval = radio->hdl.error; dev_err(&intf->dev, "couldn't register control\n"); goto err_dev; } radio->videodev = si470x_viddev_template; radio->videodev.ctrl_handler = &radio->hdl; radio->videodev.lock = &radio->lock; radio->videodev.v4l2_dev = &radio->v4l2_dev; radio->videodev.release = video_device_release_empty; radio->videodev.device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; video_set_drvdata(&radio->videodev, radio); /* get device and chip versions */ if (si470x_get_all_registers(radio) < 0) { retval = -EIO; goto err_ctrl; } dev_info(&intf->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n", radio->registers[DEVICEID], radio->registers[SI_CHIPID]); if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) { dev_warn(&intf->dev, "This driver is known to work with firmware version %u, but the device has firmware version %u.\n", RADIO_FW_VERSION, radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE); version_warning = 1; } /* get software and hardware versions */ if (si470x_get_scratch_page_versions(radio) < 0) { retval = -EIO; goto err_ctrl; } dev_info(&intf->dev, "software version %d, hardware version %d\n", radio->software_version, radio->hardware_version); if (radio->hardware_version < RADIO_HW_VERSION) { dev_warn(&intf->dev, "This driver is known to work with hardware version %u, but the device has hardware version %u.\n", RADIO_HW_VERSION, radio->hardware_version); version_warning = 1; } /* give out version warning */ if (version_warning == 1) { dev_warn(&intf->dev, "If you have some trouble using this driver, please report to V4L ML at linux-media@vger.kernel.org\n"); } /* set led to connect state */ si470x_set_led_state(radio, BLINK_GREEN_LED); /* rds buffer allocation */ radio->buf_size = rds_buf * 3; radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL); if (!radio->buffer) { retval = -EIO; goto err_ctrl; } /* rds buffer configuration */ radio->wr_index = 0; radio->rd_index = 0; init_waitqueue_head(&radio->read_queue); usb_set_intfdata(intf, radio); /* start radio */ retval = si470x_start_usb(radio); if (retval < 0 && !radio->int_in_running) goto err_buf; else if (retval < 0) /* in case of radio->int_in_running == 1 */ goto err_all; /* set initial frequency */ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ /* register video device */ retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr); if (retval) { dev_err(&intf->dev, "Could not register video device\n"); goto err_all; } return 0; err_all: usb_kill_urb(radio->int_in_urb); err_buf: kfree(radio->buffer); err_ctrl: v4l2_ctrl_handler_free(&radio->hdl); err_dev: v4l2_device_unregister(&radio->v4l2_dev); err_urb: usb_free_urb(radio->int_in_urb); err_intbuffer: kfree(radio->int_in_buffer); err_usbbuf: kfree(radio->usb_buf); err_radio: kfree(radio); err_initial: return retval; } /* * si470x_usb_driver_suspend - suspend the device */ static int si470x_usb_driver_suspend(struct usb_interface *intf, pm_message_t message) { struct si470x_device *radio = usb_get_intfdata(intf); dev_info(&intf->dev, "suspending now...\n"); /* shutdown interrupt handler */ if (radio->int_in_running) { radio->int_in_running = 0; if (radio->int_in_urb) usb_kill_urb(radio->int_in_urb); } /* cancel read processes */ wake_up_interruptible(&radio->read_queue); /* stop radio */ si470x_stop(radio); return 0; } /* * si470x_usb_driver_resume - resume the device */ static int si470x_usb_driver_resume(struct usb_interface *intf) { struct si470x_device *radio = usb_get_intfdata(intf); int ret; dev_info(&intf->dev, "resuming now...\n"); /* start radio */ ret = si470x_start_usb(radio); if (ret == 0) v4l2_ctrl_handler_setup(&radio->hdl); return ret; } /* * si470x_usb_driver_disconnect - disconnect the device */ static void si470x_usb_driver_disconnect(struct usb_interface *intf) { struct si470x_device *radio = usb_get_intfdata(intf); mutex_lock(&radio->lock); v4l2_device_disconnect(&radio->v4l2_dev); video_unregister_device(&radio->videodev); usb_kill_urb(radio->int_in_urb); usb_set_intfdata(intf, NULL); mutex_unlock(&radio->lock); v4l2_device_put(&radio->v4l2_dev); } /* * si470x_usb_driver - usb driver interface * * A note on suspend/resume: this driver had only empty suspend/resume * functions, and when I tried to test suspend/resume it always disconnected * instead of resuming (using my ADS InstantFM stick). So I've decided to * remove these callbacks until someone else with better hardware can * implement and test this. */ static struct usb_driver si470x_usb_driver = { .name = DRIVER_NAME, .probe = si470x_usb_driver_probe, .disconnect = si470x_usb_driver_disconnect, .suspend = si470x_usb_driver_suspend, .resume = si470x_usb_driver_resume, .reset_resume = si470x_usb_driver_resume, .id_table = si470x_usb_driver_id_table, }; module_usb_driver(si470x_usb_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); |
3 19 3 3 32 1 1 32 3 9 65 65 65 65 64 9 30 7 7 2 2 2 12 3 3 10 7 19 2 5 2 11 17 37 37 37 37 24 34 34 37 37 37 37 37 26 26 2 26 24 17 17 7 19 19 19 19 19 19 19 19 19 19 19 2 19 19 19 2 2 42 42 3 42 42 42 20 11 19 3 2 2 17 20 5 1 1 10 10 5 6 6 18 5 5 4 4 1 27 27 27 26 26 1 18 5 1 27 27 13 41 12 12 12 12 26 17 17 26 37 27 32 2 7 2 32 17 20 14 9 6 6 2 1 6 2 6 14 9 5 4 4 2 2 4 2 2 4 1 1 4 12 27 4 1 3 2 3 23 1 26 37 2 2 2 26 13 1 1 1 18 19 19 19 19 1 19 19 19 19 19 6 6 6 6 19 19 19 19 19 4 19 2 12 12 7 7 4 13 13 13 1 12 13 1 18 19 2 2 18 9 9 8 18 3 18 19 24 24 24 1 23 23 20 20 20 20 19 19 7 19 6 20 19 28 3 3 2 1 2 2 2 2 2 2 2 2 2 2 2 2 7 7 7 6 6 6 6 4 3 3 3 2 2 2 2 2 2 19 19 1 2 19 18 18 18 18 18 18 18 16 16 16 16 16 16 13 4 4 4 1 3 3 3 3 2 4 4 17 17 2 7 7 7 1 7 8 4 3 7 1 7 7 7 7 1 1 1 1 7 7 7 7 3 6 1 7 7 7 7 7 65 65 65 10 63 7 56 7 6 8 28 17 2 1 134 135 7 135 131 128 128 129 13 129 62 65 64 65 129 1 1 1 1 6 3 3 2 2 2 2 1 1 6 6 126 9 8 7 7 7 7 128 129 129 126 1 1 129 126 125 126 126 126 126 3 129 135 135 135 7 135 135 135 4 4 4 21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 | // SPDX-License-Identifier: GPL-2.0-or-later /* * (Tentative) USB Audio Driver for ALSA * * Mixer control part * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * Many codes borrowed from audio.c by * Alan Cox (alan@lxorguk.ukuu.org.uk) * Thomas Sailer (sailer@ife.ee.ethz.ch) */ /* * TODOs, for both the mixer and the streaming interfaces: * * - support for UAC2 effect units * - support for graphical equalizers * - RANGE and MEM set commands (UAC2) * - RANGE and MEM interrupt dispatchers (UAC2) * - audio channel clustering (UAC2) * - audio sample rate converter units (UAC2) * - proper handling of clock multipliers (UAC2) * - dispatch clock change notifications (UAC2) * - stop PCM streams which use a clock that became invalid * - stop PCM streams which use a clock selector that has changed * - parse available sample rates again when clock sources changed */ #include <linux/bitops.h> #include <linux/init.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <linux/usb/audio-v3.h> #include <sound/core.h> #include <sound/control.h> #include <sound/hwdep.h> #include <sound/info.h> #include <sound/tlv.h> #include "usbaudio.h" #include "mixer.h" #include "helper.h" #include "mixer_quirks.h" #include "power.h" #define MAX_ID_ELEMS 256 struct usb_audio_term { int id; int type; int channels; unsigned int chconfig; int name; }; struct usbmix_name_map; struct mixer_build { struct snd_usb_audio *chip; struct usb_mixer_interface *mixer; unsigned char *buffer; unsigned int buflen; DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS); struct usb_audio_term oterm; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; }; /*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ enum { USB_XU_CLOCK_RATE = 0xe301, USB_XU_CLOCK_SOURCE = 0xe302, USB_XU_DIGITAL_IO_STATUS = 0xe303, USB_XU_DEVICE_OPTIONS = 0xe304, USB_XU_DIRECT_MONITORING = 0xe305, USB_XU_METERING = 0xe306 }; enum { USB_XU_CLOCK_SOURCE_SELECTOR = 0x02, /* clock source*/ USB_XU_CLOCK_RATE_SELECTOR = 0x03, /* clock rate */ USB_XU_DIGITAL_FORMAT_SELECTOR = 0x01, /* the spdif format */ USB_XU_SOFT_LIMIT_SELECTOR = 0x03 /* soft limiter */ }; /* * manual mapping of mixer names * if the mixer topology is too complicated and the parsed names are * ambiguous, add the entries in usbmixer_maps.c. */ #include "mixer_maps.c" static const struct usbmix_name_map * find_map(const struct usbmix_name_map *p, int unitid, int control) { if (!p) return NULL; for (; p->id; p++) { if (p->id == unitid && (!control || !p->control || control == p->control)) return p; } return NULL; } /* get the mapped name if the unit matches */ static int check_mapped_name(const struct usbmix_name_map *p, char *buf, int buflen) { int len; if (!p || !p->name) return 0; buflen--; len = strscpy(buf, p->name, buflen); return len < 0 ? buflen : len; } /* ignore the error value if ignore_ctl_error flag is set */ #define filter_error(cval, err) \ ((cval)->head.mixer->ignore_ctl_error ? 0 : (err)) /* check whether the control should be ignored */ static inline int check_ignored_ctl(const struct usbmix_name_map *p) { if (!p || p->name || p->dB) return 0; return 1; } /* dB mapping */ static inline void check_mapped_dB(const struct usbmix_name_map *p, struct usb_mixer_elem_info *cval) { if (p && p->dB) { cval->dBmin = p->dB->min; cval->dBmax = p->dB->max; cval->min_mute = p->dB->min_mute; cval->initialized = 1; } } /* get the mapped selector source name */ static int check_mapped_selector_name(struct mixer_build *state, int unitid, int index, char *buf, int buflen) { const struct usbmix_selector_map *p; int len; if (!state->selector_map) return 0; for (p = state->selector_map; p->id; p++) { if (p->id == unitid && index < p->count) { len = strscpy(buf, p->names[index], buflen); return len < 0 ? buflen : len; } } return 0; } /* * find an audio control unit with the given unit id */ static void *find_audio_control_unit(struct mixer_build *state, unsigned char unit) { /* we just parse the header */ struct uac_feature_unit_descriptor *hdr = NULL; while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr, USB_DT_CS_INTERFACE)) != NULL) { if (hdr->bLength >= 4 && hdr->bDescriptorSubtype >= UAC_INPUT_TERMINAL && hdr->bDescriptorSubtype <= UAC3_SAMPLE_RATE_CONVERTER && hdr->bUnitID == unit) return hdr; } return NULL; } /* * copy a string with the given id */ static int snd_usb_copy_string_desc(struct snd_usb_audio *chip, int index, char *buf, int maxlen) { int len = usb_string(chip->dev, index, buf, maxlen - 1); if (len < 0) return 0; buf[len] = 0; return len; } /* * convert from the byte/word on usb descriptor to the zero-based integer */ static int convert_signed_value(struct usb_mixer_elem_info *cval, int val) { switch (cval->val_type) { case USB_MIXER_BOOLEAN: return !!val; case USB_MIXER_INV_BOOLEAN: return !val; case USB_MIXER_U8: val &= 0xff; break; case USB_MIXER_S8: val &= 0xff; if (val >= 0x80) val -= 0x100; break; case USB_MIXER_U16: val &= 0xffff; break; case USB_MIXER_S16: val &= 0xffff; if (val >= 0x8000) val -= 0x10000; break; } return val; } /* * convert from the zero-based int to the byte/word for usb descriptor */ static int convert_bytes_value(struct usb_mixer_elem_info *cval, int val) { switch (cval->val_type) { case USB_MIXER_BOOLEAN: return !!val; case USB_MIXER_INV_BOOLEAN: return !val; case USB_MIXER_S8: case USB_MIXER_U8: return val & 0xff; case USB_MIXER_S16: case USB_MIXER_U16: return val & 0xffff; } return 0; /* not reached */ } static int get_relative_value(struct usb_mixer_elem_info *cval, int val) { if (!cval->res) cval->res = 1; if (val < cval->min) return 0; else if (val >= cval->max) return DIV_ROUND_UP(cval->max - cval->min, cval->res); else return (val - cval->min) / cval->res; } static int get_abs_value(struct usb_mixer_elem_info *cval, int val) { if (val < 0) return cval->min; if (!cval->res) cval->res = 1; val *= cval->res; val += cval->min; if (val > cval->max) return cval->max; return val; } static int uac2_ctl_value_size(int val_type) { switch (val_type) { case USB_MIXER_S32: case USB_MIXER_U32: return 4; case USB_MIXER_S16: case USB_MIXER_U16: return 2; default: return 1; } return 0; /* unreachable */ } /* * retrieve a mixer value */ static inline int mixer_ctrl_intf(struct usb_mixer_interface *mixer) { return get_iface_desc(mixer->hostif)->bInterfaceNumber; } static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { struct snd_usb_audio *chip = cval->head.mixer->chip; unsigned char buf[2]; int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; int timeout = 10; int idx = 0, err; err = snd_usb_lock_shutdown(chip); if (err < 0) return -EIO; while (timeout-- > 0) { idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8); err = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, buf, val_len); if (err >= val_len) { *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len)); err = 0; goto out; } else if (err == -ETIMEDOUT) { goto out; } } usb_audio_dbg(chip, "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", request, validx, idx, cval->val_type); err = -EINVAL; out: snd_usb_unlock_shutdown(chip); return err; } static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { struct snd_usb_audio *chip = cval->head.mixer->chip; /* enough space for one range */ unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)]; unsigned char *val; int idx = 0, ret, val_size, size; __u8 bRequest; val_size = uac2_ctl_value_size(cval->val_type); if (request == UAC_GET_CUR) { bRequest = UAC2_CS_CUR; size = val_size; } else { bRequest = UAC2_CS_RANGE; size = sizeof(__u16) + 3 * val_size; } memset(buf, 0, sizeof(buf)); if (snd_usb_lock_shutdown(chip)) return -EIO; idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8); ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, buf, size); snd_usb_unlock_shutdown(chip); if (ret < 0) { usb_audio_dbg(chip, "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", request, validx, idx, cval->val_type); return ret; } /* FIXME: how should we handle multiple triplets here? */ switch (request) { case UAC_GET_CUR: val = buf; break; case UAC_GET_MIN: val = buf + sizeof(__u16); break; case UAC_GET_MAX: val = buf + sizeof(__u16) + val_size; break; case UAC_GET_RES: val = buf + sizeof(__u16) + val_size * 2; break; default: return -EINVAL; } *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, val_size)); return 0; } static int get_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { validx += cval->idx_off; return (cval->head.mixer->protocol == UAC_VERSION_1) ? get_ctl_value_v1(cval, request, validx, value_ret) : get_ctl_value_v2(cval, request, validx, value_ret); } static int get_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int *value) { return get_ctl_value(cval, UAC_GET_CUR, validx, value); } /* channel = 0: master, 1 = first channel */ static inline int get_cur_mix_raw(struct usb_mixer_elem_info *cval, int channel, int *value) { return get_ctl_value(cval, UAC_GET_CUR, (cval->control << 8) | channel, value); } int snd_usb_get_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int *value) { int err; if (cval->cached & BIT(channel)) { *value = cval->cache_val[index]; return 0; } err = get_cur_mix_raw(cval, channel, value); if (err < 0) { if (!cval->head.mixer->ignore_ctl_error) usb_audio_dbg(cval->head.mixer->chip, "cannot get current value for control %d ch %d: err = %d\n", cval->control, channel, err); return err; } cval->cached |= BIT(channel); cval->cache_val[index] = *value; return 0; } /* * set a mixer value */ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set) { struct snd_usb_audio *chip = cval->head.mixer->chip; unsigned char buf[4]; int idx = 0, val_len, err, timeout = 10; validx += cval->idx_off; if (cval->head.mixer->protocol == UAC_VERSION_1) { val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; } else { /* UAC_VERSION_2/3 */ val_len = uac2_ctl_value_size(cval->val_type); /* FIXME */ if (request != UAC_SET_CUR) { usb_audio_dbg(chip, "RANGE setting not yet supported\n"); return -EINVAL; } request = UAC2_CS_CUR; } value_set = convert_bytes_value(cval, value_set); buf[0] = value_set & 0xff; buf[1] = (value_set >> 8) & 0xff; buf[2] = (value_set >> 16) & 0xff; buf[3] = (value_set >> 24) & 0xff; err = snd_usb_lock_shutdown(chip); if (err < 0) return -EIO; while (timeout-- > 0) { idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8); err = snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, validx, idx, buf, val_len); if (err >= 0) { err = 0; goto out; } else if (err == -ETIMEDOUT) { goto out; } } usb_audio_dbg(chip, "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n", request, validx, idx, cval->val_type, buf[0], buf[1]); err = -EINVAL; out: snd_usb_unlock_shutdown(chip); return err; } static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value) { return snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, validx, value); } int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int value) { int err; unsigned int read_only = (channel == 0) ? cval->master_readonly : cval->ch_readonly & BIT(channel - 1); if (read_only) { usb_audio_dbg(cval->head.mixer->chip, "%s(): channel %d of control %d is read_only\n", __func__, channel, cval->control); return 0; } err = snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, (cval->control << 8) | channel, value); if (err < 0) return err; cval->cached |= BIT(channel); cval->cache_val[index] = value; return 0; } /* * TLV callback for mixer volume controls */ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *_tlv) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); DECLARE_TLV_DB_MINMAX(scale, 0, 0); if (size < sizeof(scale)) return -ENOMEM; if (cval->min_mute) scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE; scale[2] = cval->dBmin; scale[3] = cval->dBmax; if (copy_to_user(_tlv, scale, sizeof(scale))) return -EFAULT; return 0; } /* * parser routines begin here... */ static int parse_audio_unit(struct mixer_build *state, int unitid); /* * check if the input/output channel routing is enabled on the given bitmap. * used for mixer unit parser */ static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_outs) { int idx = ich * num_outs + och; return bmap[idx >> 3] & (0x80 >> (idx & 7)); } /* * add an alsa control element * search and increment the index until an empty slot is found. * * if failed, give up and free the control instance. */ int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, struct snd_kcontrol *kctl, bool is_std_info) { struct usb_mixer_interface *mixer = list->mixer; int err; while (snd_ctl_find_id(mixer->chip->card, &kctl->id)) kctl->id.index++; err = snd_ctl_add(mixer->chip->card, kctl); if (err < 0) { usb_audio_dbg(mixer->chip, "cannot add control (err = %d)\n", err); return err; } list->kctl = kctl; list->is_std_info = is_std_info; list->next_id_elem = mixer->id_elems[list->id]; mixer->id_elems[list->id] = list; return 0; } /* * get a terminal name string */ static struct iterm_name_combo { int type; char *name; } iterm_names[] = { { 0x0300, "Output" }, { 0x0301, "Speaker" }, { 0x0302, "Headphone" }, { 0x0303, "HMD Audio" }, { 0x0304, "Desktop Speaker" }, { 0x0305, "Room Speaker" }, { 0x0306, "Com Speaker" }, { 0x0307, "LFE" }, { 0x0600, "External In" }, { 0x0601, "Analog In" }, { 0x0602, "Digital In" }, { 0x0603, "Line" }, { 0x0604, "Legacy In" }, { 0x0605, "IEC958 In" }, { 0x0606, "1394 DA Stream" }, { 0x0607, "1394 DV Stream" }, { 0x0700, "Embedded" }, { 0x0701, "Noise Source" }, { 0x0702, "Equalization Noise" }, { 0x0703, "CD" }, { 0x0704, "DAT" }, { 0x0705, "DCC" }, { 0x0706, "MiniDisk" }, { 0x0707, "Analog Tape" }, { 0x0708, "Phonograph" }, { 0x0709, "VCR Audio" }, { 0x070a, "Video Disk Audio" }, { 0x070b, "DVD Audio" }, { 0x070c, "TV Tuner Audio" }, { 0x070d, "Satellite Rec Audio" }, { 0x070e, "Cable Tuner Audio" }, { 0x070f, "DSS Audio" }, { 0x0710, "Radio Receiver" }, { 0x0711, "Radio Transmitter" }, { 0x0712, "Multi-Track Recorder" }, { 0x0713, "Synthesizer" }, { 0 }, }; static int get_term_name(struct snd_usb_audio *chip, struct usb_audio_term *iterm, unsigned char *name, int maxlen, int term_only) { struct iterm_name_combo *names; int len; if (iterm->name) { len = snd_usb_copy_string_desc(chip, iterm->name, name, maxlen); if (len) return len; } /* virtual type - not a real terminal */ if (iterm->type >> 16) { if (term_only) return 0; switch (iterm->type >> 16) { case UAC3_SELECTOR_UNIT: strcpy(name, "Selector"); return 8; case UAC3_PROCESSING_UNIT: strcpy(name, "Process Unit"); return 12; case UAC3_EXTENSION_UNIT: strcpy(name, "Ext Unit"); return 8; case UAC3_MIXER_UNIT: strcpy(name, "Mixer"); return 5; default: return sprintf(name, "Unit %d", iterm->id); } } switch (iterm->type & 0xff00) { case 0x0100: strcpy(name, "PCM"); return 3; case 0x0200: strcpy(name, "Mic"); return 3; case 0x0400: strcpy(name, "Headset"); return 7; case 0x0500: strcpy(name, "Phone"); return 5; } for (names = iterm_names; names->type; names++) { if (names->type == iterm->type) { strcpy(name, names->name); return strlen(names->name); } } return 0; } /* * Get logical cluster information for UAC3 devices. */ static int get_cluster_channels_v3(struct mixer_build *state, unsigned int cluster_id) { struct uac3_cluster_header_descriptor c_header; int err; err = snd_usb_ctl_msg(state->chip->dev, usb_rcvctrlpipe(state->chip->dev, 0), UAC3_CS_REQ_HIGH_CAPABILITY_DESCRIPTOR, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, cluster_id, snd_usb_ctrl_intf(state->mixer->hostif), &c_header, sizeof(c_header)); if (err < 0) goto error; if (err != sizeof(c_header)) { err = -EIO; goto error; } return c_header.bNrChannels; error: usb_audio_err(state->chip, "cannot request logical cluster ID: %d (err: %d)\n", cluster_id, err); return err; } /* * Get number of channels for a Mixer Unit. */ static int uac_mixer_unit_get_channels(struct mixer_build *state, struct uac_mixer_unit_descriptor *desc) { int mu_channels; switch (state->mixer->protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1) return 0; /* no bmControls -> skip */ mu_channels = uac_mixer_unit_bNrChannels(desc); break; case UAC_VERSION_3: mu_channels = get_cluster_channels_v3(state, uac3_mixer_unit_wClusterDescrID(desc)); break; } return mu_channels; } /* * Parse Input Terminal Unit */ static int __check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term); static int parse_term_uac1_iterm_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac_input_terminal_descriptor *d = p1; term->type = le16_to_cpu(d->wTerminalType); term->channels = d->bNrChannels; term->chconfig = le16_to_cpu(d->wChannelConfig); term->name = d->iTerminal; return 0; } static int parse_term_uac2_iterm_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac2_input_terminal_descriptor *d = p1; int err; /* call recursively to verify the referenced clock entity */ err = __check_input_term(state, d->bCSourceID, term); if (err < 0) return err; /* save input term properties after recursion, * to ensure they are not overriden by the recursion calls */ term->id = id; term->type = le16_to_cpu(d->wTerminalType); term->channels = d->bNrChannels; term->chconfig = le32_to_cpu(d->bmChannelConfig); term->name = d->iTerminal; return 0; } static int parse_term_uac3_iterm_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac3_input_terminal_descriptor *d = p1; int err; /* call recursively to verify the referenced clock entity */ err = __check_input_term(state, d->bCSourceID, term); if (err < 0) return err; /* save input term properties after recursion, * to ensure they are not overriden by the recursion calls */ term->id = id; term->type = le16_to_cpu(d->wTerminalType); err = get_cluster_channels_v3(state, le16_to_cpu(d->wClusterDescrID)); if (err < 0) return err; term->channels = err; /* REVISIT: UAC3 IT doesn't have channels cfg */ term->chconfig = 0; term->name = le16_to_cpu(d->wTerminalDescrStr); return 0; } static int parse_term_mixer_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac_mixer_unit_descriptor *d = p1; int protocol = state->mixer->protocol; int err; err = uac_mixer_unit_get_channels(state, d); if (err <= 0) return err; term->type = UAC3_MIXER_UNIT << 16; /* virtual type */ term->channels = err; if (protocol != UAC_VERSION_3) { term->chconfig = uac_mixer_unit_wChannelConfig(d, protocol); term->name = uac_mixer_unit_iMixer(d); } return 0; } static int parse_term_selector_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac_selector_unit_descriptor *d = p1; int err; /* call recursively to retrieve the channel info */ err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ term->id = id; if (state->mixer->protocol != UAC_VERSION_3) term->name = uac_selector_unit_iSelector(d); return 0; } static int parse_term_proc_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id, int vtype) { struct uac_processing_unit_descriptor *d = p1; int protocol = state->mixer->protocol; int err; if (d->bNrInPins) { /* call recursively to retrieve the channel info */ err = __check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; } term->type = vtype << 16; /* virtual type */ term->id = id; if (protocol == UAC_VERSION_3) return 0; if (!term->channels) { term->channels = uac_processing_unit_bNrChannels(d); term->chconfig = uac_processing_unit_wChannelConfig(d, protocol); } term->name = uac_processing_unit_iProcessing(d, protocol); return 0; } static int parse_term_effect_unit(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac2_effect_unit_descriptor *d = p1; int err; err = __check_input_term(state, d->bSourceID, term); if (err < 0) return err; term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */ term->id = id; return 0; } static int parse_term_uac2_clock_source(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac_clock_source_descriptor *d = p1; term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */ term->id = id; term->name = d->iClockSource; return 0; } static int parse_term_uac3_clock_source(struct mixer_build *state, struct usb_audio_term *term, void *p1, int id) { struct uac3_clock_source_descriptor *d = p1; term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */ term->id = id; term->name = le16_to_cpu(d->wClockSourceStr); return 0; } #define PTYPE(a, b) ((a) << 8 | (b)) /* * parse the source unit recursively until it reaches to a terminal * or a branched unit. */ static int __check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term) { int protocol = state->mixer->protocol; void *p1; unsigned char *hdr; for (;;) { /* a loop in the terminal chain? */ if (test_and_set_bit(id, state->termbitmap)) return -EINVAL; p1 = find_audio_control_unit(state, id); if (!p1) break; if (!snd_usb_validate_audio_desc(p1, protocol)) break; /* bad descriptor */ hdr = p1; term->id = id; switch (PTYPE(protocol, hdr[2])) { case PTYPE(UAC_VERSION_1, UAC_FEATURE_UNIT): case PTYPE(UAC_VERSION_2, UAC_FEATURE_UNIT): case PTYPE(UAC_VERSION_3, UAC3_FEATURE_UNIT): { /* the header is the same for all versions */ struct uac_feature_unit_descriptor *d = p1; id = d->bSourceID; break; /* continue to parse */ } case PTYPE(UAC_VERSION_1, UAC_INPUT_TERMINAL): return parse_term_uac1_iterm_unit(state, term, p1, id); case PTYPE(UAC_VERSION_2, UAC_INPUT_TERMINAL): return parse_term_uac2_iterm_unit(state, term, p1, id); case PTYPE(UAC_VERSION_3, UAC_INPUT_TERMINAL): return parse_term_uac3_iterm_unit(state, term, p1, id); case PTYPE(UAC_VERSION_1, UAC_MIXER_UNIT): case PTYPE(UAC_VERSION_2, UAC_MIXER_UNIT): case PTYPE(UAC_VERSION_3, UAC3_MIXER_UNIT): return parse_term_mixer_unit(state, term, p1, id); case PTYPE(UAC_VERSION_1, UAC_SELECTOR_UNIT): case PTYPE(UAC_VERSION_2, UAC_SELECTOR_UNIT): case PTYPE(UAC_VERSION_2, UAC2_CLOCK_SELECTOR): case PTYPE(UAC_VERSION_3, UAC3_SELECTOR_UNIT): case PTYPE(UAC_VERSION_3, UAC3_CLOCK_SELECTOR): return parse_term_selector_unit(state, term, p1, id); case PTYPE(UAC_VERSION_1, UAC1_PROCESSING_UNIT): case PTYPE(UAC_VERSION_2, UAC2_PROCESSING_UNIT_V2): case PTYPE(UAC_VERSION_3, UAC3_PROCESSING_UNIT): return parse_term_proc_unit(state, term, p1, id, UAC3_PROCESSING_UNIT); case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT): case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT): return parse_term_effect_unit(state, term, p1, id); case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT): case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2): case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT): return parse_term_proc_unit(state, term, p1, id, UAC3_EXTENSION_UNIT); case PTYPE(UAC_VERSION_2, UAC2_CLOCK_SOURCE): return parse_term_uac2_clock_source(state, term, p1, id); case PTYPE(UAC_VERSION_3, UAC3_CLOCK_SOURCE): return parse_term_uac3_clock_source(state, term, p1, id); default: return -ENODEV; } } return -ENODEV; } static int check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term) { memset(term, 0, sizeof(*term)); memset(state->termbitmap, 0, sizeof(state->termbitmap)); return __check_input_term(state, id, term); } /* * Feature Unit */ /* feature unit control information */ struct usb_feature_control_info { int control; const char *name; int type; /* data type for uac1 */ int type_uac2; /* data type for uac2 if different from uac1, else -1 */ }; static const struct usb_feature_control_info audio_feature_info[] = { { UAC_FU_MUTE, "Mute", USB_MIXER_INV_BOOLEAN, -1 }, { UAC_FU_VOLUME, "Volume", USB_MIXER_S16, -1 }, { UAC_FU_BASS, "Tone Control - Bass", USB_MIXER_S8, -1 }, { UAC_FU_MID, "Tone Control - Mid", USB_MIXER_S8, -1 }, { UAC_FU_TREBLE, "Tone Control - Treble", USB_MIXER_S8, -1 }, { UAC_FU_GRAPHIC_EQUALIZER, "Graphic Equalizer", USB_MIXER_S8, -1 }, /* FIXME: not implemented yet */ { UAC_FU_AUTOMATIC_GAIN, "Auto Gain Control", USB_MIXER_BOOLEAN, -1 }, { UAC_FU_DELAY, "Delay Control", USB_MIXER_U16, USB_MIXER_U32 }, { UAC_FU_BASS_BOOST, "Bass Boost", USB_MIXER_BOOLEAN, -1 }, { UAC_FU_LOUDNESS, "Loudness", USB_MIXER_BOOLEAN, -1 }, /* UAC2 specific */ { UAC2_FU_INPUT_GAIN, "Input Gain Control", USB_MIXER_S16, -1 }, { UAC2_FU_INPUT_GAIN_PAD, "Input Gain Pad Control", USB_MIXER_S16, -1 }, { UAC2_FU_PHASE_INVERTER, "Phase Inverter Control", USB_MIXER_BOOLEAN, -1 }, }; static void usb_mixer_elem_info_free(struct usb_mixer_elem_info *cval) { kfree(cval); } /* private_free callback */ void snd_usb_mixer_elem_free(struct snd_kcontrol *kctl) { usb_mixer_elem_info_free(kctl->private_data); kctl->private_data = NULL; } /* * interface to ALSA control for feature/mixer units */ /* volume control quirks */ static void volume_control_quirks(struct usb_mixer_elem_info *cval, struct snd_kcontrol *kctl) { struct snd_usb_audio *chip = cval->head.mixer->chip; if (chip->quirk_flags & QUIRK_FLAG_MIC_RES_384) { if (!strcmp(kctl->id.name, "Mic Capture Volume")) { usb_audio_info(chip, "set resolution quirk: cval->res = 384\n"); cval->res = 384; } } else if (chip->quirk_flags & QUIRK_FLAG_MIC_RES_16) { if (!strcmp(kctl->id.name, "Mic Capture Volume")) { usb_audio_info(chip, "set resolution quirk: cval->res = 16\n"); cval->res = 16; } } switch (chip->usb_id) { case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */ case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */ if (strcmp(kctl->id.name, "Effect Duration") == 0) { cval->min = 0x0000; cval->max = 0xffff; cval->res = 0x00e6; break; } if (strcmp(kctl->id.name, "Effect Volume") == 0 || strcmp(kctl->id.name, "Effect Feedback Volume") == 0) { cval->min = 0x00; cval->max = 0xff; break; } if (strstr(kctl->id.name, "Effect Return") != NULL) { cval->min = 0xb706; cval->max = 0xff7b; cval->res = 0x0073; break; } if ((strstr(kctl->id.name, "Playback Volume") != NULL) || (strstr(kctl->id.name, "Effect Send") != NULL)) { cval->min = 0xb5fb; /* -73 dB = 0xb6ff */ cval->max = 0xfcfe; cval->res = 0x0073; } break; case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */ case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */ if (strcmp(kctl->id.name, "Effect Duration") == 0) { usb_audio_info(chip, "set quirk for FTU Effect Duration\n"); cval->min = 0x0000; cval->max = 0x7f00; cval->res = 0x0100; break; } if (strcmp(kctl->id.name, "Effect Volume") == 0 || strcmp(kctl->id.name, "Effect Feedback Volume") == 0) { usb_audio_info(chip, "set quirks for FTU Effect Feedback/Volume\n"); cval->min = 0x00; cval->max = 0x7f; break; } break; case USB_ID(0x0d8c, 0x0103): if (!strcmp(kctl->id.name, "PCM Playback Volume")) { usb_audio_info(chip, "set volume quirk for CM102-A+/102S+\n"); cval->min = -256; } break; case USB_ID(0x0471, 0x0101): case USB_ID(0x0471, 0x0104): case USB_ID(0x0471, 0x0105): case USB_ID(0x0672, 0x1041): /* quirk for UDA1321/N101. * note that detection between firmware 2.1.1.7 (N101) * and later 2.1.1.21 is not very clear from datasheets. * I hope that the min value is -15360 for newer firmware --jk */ if (!strcmp(kctl->id.name, "PCM Playback Volume") && cval->min == -15616) { usb_audio_info(chip, "set volume quirk for UDA1321/N101 chip\n"); cval->max = -256; } break; case USB_ID(0x046d, 0x09a4): if (!strcmp(kctl->id.name, "Mic Capture Volume")) { usb_audio_info(chip, "set volume quirk for QuickCam E3500\n"); cval->min = 6080; cval->max = 8768; cval->res = 192; } break; case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */ if ((strstr(kctl->id.name, "Playback Volume") != NULL) || strstr(kctl->id.name, "Capture Volume") != NULL) { cval->min >>= 8; cval->max = 0; cval->res = 1; } break; } } /* forcibly initialize the current mixer value; if GET_CUR fails, set to * the minimum as default */ static void init_cur_mix_raw(struct usb_mixer_elem_info *cval, int ch, int idx) { int val, err; err = snd_usb_get_cur_mix_value(cval, ch, idx, &val); if (!err) return; if (!cval->head.mixer->ignore_ctl_error) usb_audio_warn(cval->head.mixer->chip, "%d:%d: failed to get current value for ch %d (%d)\n", cval->head.id, mixer_ctrl_intf(cval->head.mixer), ch, err); snd_usb_set_cur_mix_value(cval, ch, idx, cval->min); } /* * retrieve the minimum and maximum values for the specified control */ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval, int default_min, struct snd_kcontrol *kctl) { int i, idx; /* for failsafe */ cval->min = default_min; cval->max = cval->min + 1; cval->res = 1; cval->dBmin = cval->dBmax = 0; if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) { cval->initialized = 1; } else { int minchn = 0; if (cval->cmask) { for (i = 0; i < MAX_CHANNELS; i++) if (cval->cmask & BIT(i)) { minchn = i + 1; break; } } if (get_ctl_value(cval, UAC_GET_MAX, (cval->control << 8) | minchn, &cval->max) < 0 || get_ctl_value(cval, UAC_GET_MIN, (cval->control << 8) | minchn, &cval->min) < 0) { usb_audio_err(cval->head.mixer->chip, "%d:%d: cannot get min/max values for control %d (id %d)\n", cval->head.id, mixer_ctrl_intf(cval->head.mixer), cval->control, cval->head.id); return -EINVAL; } if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0) { cval->res = 1; } else if (cval->head.mixer->protocol == UAC_VERSION_1) { int last_valid_res = cval->res; while (cval->res > 1) { if (snd_usb_mixer_set_ctl_value(cval, UAC_SET_RES, (cval->control << 8) | minchn, cval->res / 2) < 0) break; cval->res /= 2; } if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0) cval->res = last_valid_res; } if (cval->res == 0) cval->res = 1; /* Additional checks for the proper resolution * * Some devices report smaller resolutions than actually * reacting. They don't return errors but simply clip * to the lower aligned value. */ if (cval->min + cval->res < cval->max) { int last_valid_res = cval->res; int saved, test, check; if (get_cur_mix_raw(cval, minchn, &saved) < 0) goto no_res_check; for (;;) { test = saved; if (test < cval->max) test += cval->res; else test -= cval->res; if (test < cval->min || test > cval->max || snd_usb_set_cur_mix_value(cval, minchn, 0, test) || get_cur_mix_raw(cval, minchn, &check)) { cval->res = last_valid_res; break; } if (test == check) break; cval->res *= 2; } snd_usb_set_cur_mix_value(cval, minchn, 0, saved); } no_res_check: cval->initialized = 1; } if (kctl) volume_control_quirks(cval, kctl); /* USB descriptions contain the dB scale in 1/256 dB unit * while ALSA TLV contains in 1/100 dB unit */ cval->dBmin = (convert_signed_value(cval, cval->min) * 100) / 256; cval->dBmax = (convert_signed_value(cval, cval->max) * 100) / 256; if (cval->dBmin > cval->dBmax) { /* something is wrong; assume it's either from/to 0dB */ if (cval->dBmin < 0) cval->dBmax = 0; else if (cval->dBmin > 0) cval->dBmin = 0; if (cval->dBmin > cval->dBmax) { /* totally crap, return an error */ return -EINVAL; } } else { /* if the max volume is too low, it's likely a bogus range; * here we use -96dB as the threshold */ if (cval->dBmax <= -9600) { usb_audio_info(cval->head.mixer->chip, "%d:%d: bogus dB values (%d/%d), disabling dB reporting\n", cval->head.id, mixer_ctrl_intf(cval->head.mixer), cval->dBmin, cval->dBmax); cval->dBmin = cval->dBmax = 0; } } /* initialize all elements */ if (!cval->cmask) { init_cur_mix_raw(cval, 0, 0); } else { idx = 0; for (i = 0; i < MAX_CHANNELS; i++) { if (cval->cmask & BIT(i)) { init_cur_mix_raw(cval, i + 1, idx); idx++; } } } return 0; } #define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL) /* get the max value advertised via control API */ static int get_max_exposed(struct usb_mixer_elem_info *cval) { if (!cval->max_exposed) { if (cval->res) cval->max_exposed = DIV_ROUND_UP(cval->max - cval->min, cval->res); else cval->max_exposed = cval->max - cval->min; } return cval->max_exposed; } /* get a feature/mixer unit info */ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = cval->channels; if (cval->val_type != USB_MIXER_BOOLEAN && cval->val_type != USB_MIXER_INV_BOOLEAN) { if (!cval->initialized) { get_min_max_with_quirks(cval, 0, kcontrol); if (cval->initialized && cval->dBmin >= cval->dBmax) { kcontrol->vd[0].access &= ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK); snd_ctl_notify(cval->head.mixer->chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kcontrol->id); } } } uinfo->value.integer.min = 0; uinfo->value.integer.max = get_max_exposed(cval); return 0; } /* get the current value from feature/mixer unit */ static int mixer_ctl_feature_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); int c, cnt, val, err; ucontrol->value.integer.value[0] = cval->min; if (cval->cmask) { cnt = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & BIT(c))) continue; err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &val); if (err < 0) return filter_error(cval, err); val = get_relative_value(cval, val); ucontrol->value.integer.value[cnt] = val; cnt++; } return 0; } else { /* master channel */ err = snd_usb_get_cur_mix_value(cval, 0, 0, &val); if (err < 0) return filter_error(cval, err); val = get_relative_value(cval, val); ucontrol->value.integer.value[0] = val; } return 0; } /* put the current value to feature/mixer unit */ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); int max_val = get_max_exposed(cval); int c, cnt, val, oval, err; int changed = 0; if (cval->cmask) { cnt = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & BIT(c))) continue; err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &oval); if (err < 0) return filter_error(cval, err); val = ucontrol->value.integer.value[cnt]; if (val < 0 || val > max_val) return -EINVAL; val = get_abs_value(cval, val); if (oval != val) { snd_usb_set_cur_mix_value(cval, c + 1, cnt, val); changed = 1; } cnt++; } } else { /* master channel */ err = snd_usb_get_cur_mix_value(cval, 0, 0, &oval); if (err < 0) return filter_error(cval, err); val = ucontrol->value.integer.value[0]; if (val < 0 || val > max_val) return -EINVAL; val = get_abs_value(cval, val); if (val != oval) { snd_usb_set_cur_mix_value(cval, 0, 0, val); changed = 1; } } return changed; } /* get the boolean value from the master channel of a UAC control */ static int mixer_ctl_master_bool_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); int val, err; err = snd_usb_get_cur_mix_value(cval, 0, 0, &val); if (err < 0) return filter_error(cval, err); val = (val != 0); ucontrol->value.integer.value[0] = val; return 0; } static int get_connector_value(struct usb_mixer_elem_info *cval, char *name, int *val) { struct snd_usb_audio *chip = cval->head.mixer->chip; int idx = 0, validx, ret; validx = cval->control << 8 | 0; ret = snd_usb_lock_shutdown(chip) ? -EIO : 0; if (ret) goto error; idx = mixer_ctrl_intf(cval->head.mixer) | (cval->head.id << 8); if (cval->head.mixer->protocol == UAC_VERSION_2) { struct uac2_connectors_ctl_blk uac2_conn; ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), UAC2_CS_CUR, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, &uac2_conn, sizeof(uac2_conn)); if (val) *val = !!uac2_conn.bNrChannels; } else { /* UAC_VERSION_3 */ struct uac3_insertion_ctl_blk uac3_conn; ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), UAC2_CS_CUR, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, &uac3_conn, sizeof(uac3_conn)); if (val) *val = !!uac3_conn.bmConInserted; } snd_usb_unlock_shutdown(chip); if (ret < 0) { if (name && strstr(name, "Speaker")) { if (val) *val = 1; return 0; } error: usb_audio_err(chip, "cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", UAC_GET_CUR, validx, idx, cval->val_type); if (val) *val = 0; return filter_error(cval, ret); } return ret; } /* get the connectors status and report it as boolean type */ static int mixer_ctl_connector_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); int ret, val; ret = get_connector_value(cval, kcontrol->id.name, &val); if (ret < 0) return ret; ucontrol->value.integer.value[0] = val; return 0; } static const struct snd_kcontrol_new usb_feature_unit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later manually */ .info = mixer_ctl_feature_info, .get = mixer_ctl_feature_get, .put = mixer_ctl_feature_put, }; /* the read-only variant */ static const struct snd_kcontrol_new usb_feature_unit_ctl_ro = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later manually */ .info = mixer_ctl_feature_info, .get = mixer_ctl_feature_get, .put = NULL, }; /* * A control which shows the boolean value from reading a UAC control on * the master channel. */ static const struct snd_kcontrol_new usb_bool_master_control_ctl_ro = { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "", /* will be filled later manually */ .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_ctl_boolean_mono_info, .get = mixer_ctl_master_bool_get, .put = NULL, }; static const struct snd_kcontrol_new usb_connector_ctl_ro = { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "", /* will be filled later manually */ .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_ctl_boolean_mono_info, .get = mixer_ctl_connector_get, .put = NULL, }; /* * This symbol is exported in order to allow the mixer quirks to * hook up to the standard feature unit control mechanism */ const struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl; /* * build a feature control */ static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str) { return strlcat(kctl->id.name, str, sizeof(kctl->id.name)); } /* * A lot of headsets/headphones have a "Speaker" mixer. Make sure we * rename it to "Headphone". We determine if something is a headphone * similar to how udev determines form factor. */ static void check_no_speaker_on_headset(struct snd_kcontrol *kctl, struct snd_card *card) { static const char * const names_to_check[] = { "Headset", "headset", "Headphone", "headphone", NULL}; const char * const *s; bool found = false; if (strcmp("Speaker", kctl->id.name)) return; for (s = names_to_check; *s; s++) if (strstr(card->shortname, *s)) { found = true; break; } if (!found) return; snd_ctl_rename(card, kctl, "Headphone"); } static const struct usb_feature_control_info *get_feature_control_info(int control) { int i; for (i = 0; i < ARRAY_SIZE(audio_feature_info); ++i) { if (audio_feature_info[i].control == control) return &audio_feature_info[i]; } return NULL; } static void __build_feature_ctl(struct usb_mixer_interface *mixer, const struct usbmix_name_map *imap, unsigned int ctl_mask, int control, struct usb_audio_term *iterm, struct usb_audio_term *oterm, int unitid, int nameid, int readonly_mask) { const struct usb_feature_control_info *ctl_info; unsigned int len = 0; int mapped_name = 0; struct snd_kcontrol *kctl; struct usb_mixer_elem_info *cval; const struct usbmix_name_map *map; unsigned int range; if (control == UAC_FU_GRAPHIC_EQUALIZER) { /* FIXME: not supported yet */ return; } map = find_map(imap, unitid, control); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return; snd_usb_mixer_elem_init_std(&cval->head, mixer, unitid); cval->control = control; cval->cmask = ctl_mask; ctl_info = get_feature_control_info(control); if (!ctl_info) { usb_mixer_elem_info_free(cval); return; } if (mixer->protocol == UAC_VERSION_1) cval->val_type = ctl_info->type; else /* UAC_VERSION_2 */ cval->val_type = ctl_info->type_uac2 >= 0 ? ctl_info->type_uac2 : ctl_info->type; if (ctl_mask == 0) { cval->channels = 1; /* master channel */ cval->master_readonly = readonly_mask; } else { int i, c = 0; for (i = 0; i < 16; i++) if (ctl_mask & BIT(i)) c++; cval->channels = c; cval->ch_readonly = readonly_mask; } /* * If all channels in the mask are marked read-only, make the control * read-only. snd_usb_set_cur_mix_value() will check the mask again and won't * issue write commands to read-only channels. */ if (cval->channels == readonly_mask) kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval); else kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval); if (!kctl) { usb_audio_err(mixer->chip, "cannot malloc kcontrol\n"); usb_mixer_elem_info_free(cval); return; } kctl->private_free = snd_usb_mixer_elem_free; len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); mapped_name = len != 0; if (!len && nameid) len = snd_usb_copy_string_desc(mixer->chip, nameid, kctl->id.name, sizeof(kctl->id.name)); switch (control) { case UAC_FU_MUTE: case UAC_FU_VOLUME: /* * determine the control name. the rule is: * - if a name id is given in descriptor, use it. * - if the connected input can be determined, then use the name * of terminal type. * - if the connected output can be determined, use it. * - otherwise, anonymous name. */ if (!len) { if (iterm) len = get_term_name(mixer->chip, iterm, kctl->id.name, sizeof(kctl->id.name), 1); if (!len && oterm) len = get_term_name(mixer->chip, oterm, kctl->id.name, sizeof(kctl->id.name), 1); if (!len) snprintf(kctl->id.name, sizeof(kctl->id.name), "Feature %d", unitid); } if (!mapped_name) check_no_speaker_on_headset(kctl, mixer->chip->card); /* * determine the stream direction: * if the connected output is USB stream, then it's likely a * capture stream. otherwise it should be playback (hopefully :) */ if (!mapped_name && oterm && !(oterm->type >> 16)) { if ((oterm->type & 0xff00) == 0x0100) append_ctl_name(kctl, " Capture"); else append_ctl_name(kctl, " Playback"); } append_ctl_name(kctl, control == UAC_FU_MUTE ? " Switch" : " Volume"); break; default: if (!len) strscpy(kctl->id.name, audio_feature_info[control-1].name, sizeof(kctl->id.name)); break; } /* get min/max values */ get_min_max_with_quirks(cval, 0, kctl); /* skip a bogus volume range */ if (cval->max <= cval->min) { usb_audio_dbg(mixer->chip, "[%d] FU [%s] skipped due to invalid volume\n", cval->head.id, kctl->id.name); snd_ctl_free_one(kctl); return; } if (control == UAC_FU_VOLUME) { check_mapped_dB(map, cval); if (cval->dBmin < cval->dBmax || !cval->initialized) { kctl->tlv.c = snd_usb_mixer_vol_tlv; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } } snd_usb_mixer_fu_apply_quirk(mixer, cval, unitid, kctl); range = (cval->max - cval->min) / cval->res; /* * Are there devices with volume range more than 255? I use a bit more * to be sure. 384 is a resolution magic number found on Logitech * devices. It will definitively catch all buggy Logitech devices. */ if (range > 384) { usb_audio_warn(mixer->chip, "Warning! Unlikely big volume range (=%u), cval->res is probably wrong.", range); usb_audio_warn(mixer->chip, "[%d] FU [%s] ch = %d, val = %d/%d/%d", cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); } usb_audio_dbg(mixer->chip, "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); snd_usb_mixer_add_control(&cval->head, kctl); } static void build_feature_ctl(struct mixer_build *state, void *raw_desc, unsigned int ctl_mask, int control, struct usb_audio_term *iterm, int unitid, int readonly_mask) { struct uac_feature_unit_descriptor *desc = raw_desc; int nameid = uac_feature_unit_iFeature(desc); __build_feature_ctl(state->mixer, state->map, ctl_mask, control, iterm, &state->oterm, unitid, nameid, readonly_mask); } static void build_feature_ctl_badd(struct usb_mixer_interface *mixer, unsigned int ctl_mask, int control, int unitid, const struct usbmix_name_map *badd_map) { __build_feature_ctl(mixer, badd_map, ctl_mask, control, NULL, NULL, unitid, 0, 0); } static void get_connector_control_name(struct usb_mixer_interface *mixer, struct usb_audio_term *term, bool is_input, char *name, int name_size) { int name_len = get_term_name(mixer->chip, term, name, name_size, 0); if (name_len == 0) strscpy(name, "Unknown", name_size); /* * sound/core/ctljack.c has a convention of naming jack controls * by ending in " Jack". Make it slightly more useful by * indicating Input or Output after the terminal name. */ if (is_input) strlcat(name, " - Input Jack", name_size); else strlcat(name, " - Output Jack", name_size); } /* get connector value to "wake up" the USB audio */ static int connector_mixer_resume(struct usb_mixer_elem_list *list) { struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); get_connector_value(cval, NULL, NULL); return 0; } /* Build a mixer control for a UAC connector control (jack-detect) */ static void build_connector_control(struct usb_mixer_interface *mixer, const struct usbmix_name_map *imap, struct usb_audio_term *term, bool is_input) { struct snd_kcontrol *kctl; struct usb_mixer_elem_info *cval; const struct usbmix_name_map *map; map = find_map(imap, term->id, 0); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return; snd_usb_mixer_elem_init_std(&cval->head, mixer, term->id); /* set up a specific resume callback */ cval->head.resume = connector_mixer_resume; /* * UAC2: The first byte from reading the UAC2_TE_CONNECTOR control returns the * number of channels connected. * * UAC3: The first byte specifies size of bitmap for the inserted controls. The * following byte(s) specifies which connectors are inserted. * * This boolean ctl will simply report if any channels are connected * or not. */ if (mixer->protocol == UAC_VERSION_2) cval->control = UAC2_TE_CONNECTOR; else /* UAC_VERSION_3 */ cval->control = UAC3_TE_INSERTION; cval->val_type = USB_MIXER_BOOLEAN; cval->channels = 1; /* report true if any channel is connected */ cval->min = 0; cval->max = 1; kctl = snd_ctl_new1(&usb_connector_ctl_ro, cval); if (!kctl) { usb_audio_err(mixer->chip, "cannot malloc kcontrol\n"); usb_mixer_elem_info_free(cval); return; } if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) strlcat(kctl->id.name, " Jack", sizeof(kctl->id.name)); else get_connector_control_name(mixer, term, is_input, kctl->id.name, sizeof(kctl->id.name)); kctl->private_free = snd_usb_mixer_elem_free; snd_usb_mixer_add_control(&cval->head, kctl); } static int parse_clock_source_unit(struct mixer_build *state, int unitid, void *_ftr) { struct uac_clock_source_descriptor *hdr = _ftr; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; int ret; if (state->mixer->protocol != UAC_VERSION_2) return -EINVAL; /* * The only property of this unit we are interested in is the * clock source validity. If that isn't readable, just bail out. */ if (!uac_v2v3_control_is_readable(hdr->bmControls, UAC2_CS_CONTROL_CLOCK_VALID)) return 0; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return -ENOMEM; snd_usb_mixer_elem_init_std(&cval->head, state->mixer, hdr->bClockID); cval->min = 0; cval->max = 1; cval->channels = 1; cval->val_type = USB_MIXER_BOOLEAN; cval->control = UAC2_CS_CONTROL_CLOCK_VALID; cval->master_readonly = 1; /* From UAC2 5.2.5.1.2 "Only the get request is supported." */ kctl = snd_ctl_new1(&usb_bool_master_control_ctl_ro, cval); if (!kctl) { usb_mixer_elem_info_free(cval); return -ENOMEM; } kctl->private_free = snd_usb_mixer_elem_free; ret = snd_usb_copy_string_desc(state->chip, hdr->iClockSource, kctl->id.name, sizeof(kctl->id.name)); if (ret > 0) append_ctl_name(kctl, " Validity"); else snprintf(kctl->id.name, sizeof(kctl->id.name), "Clock Source %d Validity", hdr->bClockID); return snd_usb_mixer_add_control(&cval->head, kctl); } /* * parse a feature unit * * most of controls are defined here. */ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void *_ftr) { int channels, i, j; struct usb_audio_term iterm; unsigned int master_bits; int err, csize; struct uac_feature_unit_descriptor *hdr = _ftr; __u8 *bmaControls; if (state->mixer->protocol == UAC_VERSION_1) { csize = hdr->bControlSize; channels = (hdr->bLength - 7) / csize - 1; bmaControls = hdr->bmaControls; } else if (state->mixer->protocol == UAC_VERSION_2) { struct uac2_feature_unit_descriptor *ftr = _ftr; csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; } else { /* UAC_VERSION_3 */ struct uac3_feature_unit_descriptor *ftr = _ftr; csize = 4; channels = (ftr->bLength - 7) / 4 - 1; bmaControls = ftr->bmaControls; } if (channels > 32) { usb_audio_info(state->chip, "usbmixer: too many channels (%d) in unit %d\n", channels, unitid); return -EINVAL; } /* parse the source unit */ err = parse_audio_unit(state, hdr->bSourceID); if (err < 0) return err; /* determine the input source type and name */ err = check_input_term(state, hdr->bSourceID, &iterm); if (err < 0) return err; master_bits = snd_usb_combine_bytes(bmaControls, csize); /* master configuration quirks */ switch (state->chip->usb_id) { case USB_ID(0x08bb, 0x2702): usb_audio_info(state->chip, "usbmixer: master volume quirk for PCM2702 chip\n"); /* disable non-functional volume control */ master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME); break; case USB_ID(0x1130, 0xf211): usb_audio_info(state->chip, "usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n"); /* disable non-functional volume control */ channels = 0; break; } if (state->mixer->protocol == UAC_VERSION_1) { /* check all control types */ for (i = 0; i < 10; i++) { unsigned int ch_bits = 0; int control = audio_feature_info[i].control; for (j = 0; j < channels; j++) { unsigned int mask; mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize); if (mask & BIT(i)) ch_bits |= BIT(j); } /* audio class v1 controls are never read-only */ /* * The first channel must be set * (for ease of programming). */ if (ch_bits & 1) build_feature_ctl(state, _ftr, ch_bits, control, &iterm, unitid, 0); if (master_bits & BIT(i)) build_feature_ctl(state, _ftr, 0, control, &iterm, unitid, 0); } } else { /* UAC_VERSION_2/3 */ for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) { unsigned int ch_bits = 0; unsigned int ch_read_only = 0; int control = audio_feature_info[i].control; for (j = 0; j < channels; j++) { unsigned int mask; mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize); if (uac_v2v3_control_is_readable(mask, control)) { ch_bits |= BIT(j); if (!uac_v2v3_control_is_writeable(mask, control)) ch_read_only |= BIT(j); } } /* * NOTE: build_feature_ctl() will mark the control * read-only if all channels are marked read-only in * the descriptors. Otherwise, the control will be * reported as writeable, but the driver will not * actually issue a write command for read-only * channels. */ /* * The first channel must be set * (for ease of programming). */ if (ch_bits & 1) build_feature_ctl(state, _ftr, ch_bits, control, &iterm, unitid, ch_read_only); if (uac_v2v3_control_is_readable(master_bits, control)) build_feature_ctl(state, _ftr, 0, control, &iterm, unitid, !uac_v2v3_control_is_writeable(master_bits, control)); } } return 0; } /* * Mixer Unit */ /* check whether the given in/out overflows bmMixerControls matrix */ static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc, int protocol, int num_ins, int num_outs) { u8 *hdr = (u8 *)desc; u8 *c = uac_mixer_unit_bmControls(desc, protocol); size_t rest; /* remaining bytes after bmMixerControls */ switch (protocol) { case UAC_VERSION_1: default: rest = 1; /* iMixer */ break; case UAC_VERSION_2: rest = 2; /* bmControls + iMixer */ break; case UAC_VERSION_3: rest = 6; /* bmControls + wMixerDescrStr */ break; } /* overflow? */ return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0]; } /* * build a mixer unit control * * the callbacks are identical with feature unit. * input channel number (zero based) is given in control field instead. */ static void build_mixer_unit_ctl(struct mixer_build *state, struct uac_mixer_unit_descriptor *desc, int in_pin, int in_ch, int num_outs, int unitid, struct usb_audio_term *iterm) { struct usb_mixer_elem_info *cval; unsigned int i, len; struct snd_kcontrol *kctl; const struct usbmix_name_map *map; map = find_map(state->map, unitid, 0); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return; snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid); cval->control = in_ch + 1; /* based on 1 */ cval->val_type = USB_MIXER_S16; for (i = 0; i < num_outs; i++) { __u8 *c = uac_mixer_unit_bmControls(desc, state->mixer->protocol); if (check_matrix_bitmap(c, in_ch, i, num_outs)) { cval->cmask |= BIT(i); cval->channels++; } } /* get min/max values */ get_min_max(cval, 0); kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval); if (!kctl) { usb_audio_err(state->chip, "cannot malloc kcontrol\n"); usb_mixer_elem_info_free(cval); return; } kctl->private_free = snd_usb_mixer_elem_free; len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); if (!len) len = get_term_name(state->chip, iterm, kctl->id.name, sizeof(kctl->id.name), 0); if (!len) snprintf(kctl->id.name, sizeof(kctl->id.name), "Mixer Source %d", in_ch + 1); append_ctl_name(kctl, " Volume"); usb_audio_dbg(state->chip, "[%d] MU [%s] ch = %d, val = %d/%d\n", cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max); snd_usb_mixer_add_control(&cval->head, kctl); } static int parse_audio_input_terminal(struct mixer_build *state, int unitid, void *raw_desc) { struct usb_audio_term iterm; unsigned int control, bmctls, term_id; if (state->mixer->protocol == UAC_VERSION_2) { struct uac2_input_terminal_descriptor *d_v2 = raw_desc; control = UAC2_TE_CONNECTOR; term_id = d_v2->bTerminalID; bmctls = le16_to_cpu(d_v2->bmControls); } else if (state->mixer->protocol == UAC_VERSION_3) { struct uac3_input_terminal_descriptor *d_v3 = raw_desc; control = UAC3_TE_INSERTION; term_id = d_v3->bTerminalID; bmctls = le32_to_cpu(d_v3->bmControls); } else { return 0; /* UAC1. No Insertion control */ } check_input_term(state, term_id, &iterm); /* Check for jack detection. */ if ((iterm.type & 0xff00) != 0x0100 && uac_v2v3_control_is_readable(bmctls, control)) build_connector_control(state->mixer, state->map, &iterm, true); return 0; } /* * parse a mixer unit */ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, void *raw_desc) { struct uac_mixer_unit_descriptor *desc = raw_desc; struct usb_audio_term iterm; int input_pins, num_ins, num_outs; int pin, ich, err; err = uac_mixer_unit_get_channels(state, desc); if (err < 0) { usb_audio_err(state->chip, "invalid MIXER UNIT descriptor %d\n", unitid); return err; } num_outs = err; input_pins = desc->bNrInPins; num_ins = 0; ich = 0; for (pin = 0; pin < input_pins; pin++) { err = parse_audio_unit(state, desc->baSourceID[pin]); if (err < 0) continue; /* no bmControls field (e.g. Maya44) -> ignore */ if (!num_outs) continue; err = check_input_term(state, desc->baSourceID[pin], &iterm); if (err < 0) return err; num_ins += iterm.channels; if (mixer_bitmap_overflow(desc, state->mixer->protocol, num_ins, num_outs)) break; for (; ich < num_ins; ich++) { int och, ich_has_controls = 0; for (och = 0; och < num_outs; och++) { __u8 *c = uac_mixer_unit_bmControls(desc, state->mixer->protocol); if (check_matrix_bitmap(c, ich, och, num_outs)) { ich_has_controls = 1; break; } } if (ich_has_controls) build_mixer_unit_ctl(state, desc, pin, ich, num_outs, unitid, &iterm); } } return 0; } /* * Processing Unit / Extension Unit */ /* get callback for processing/extension unit */ static int mixer_ctl_procunit_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); int err, val; err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0) { ucontrol->value.integer.value[0] = cval->min; return filter_error(cval, err); } val = get_relative_value(cval, val); ucontrol->value.integer.value[0] = val; return 0; } /* put callback for processing/extension unit */ static int mixer_ctl_procunit_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); int val, oval, err; err = get_cur_ctl_value(cval, cval->control << 8, &oval); if (err < 0) return filter_error(cval, err); val = ucontrol->value.integer.value[0]; if (val < 0 || val > get_max_exposed(cval)) return -EINVAL; val = get_abs_value(cval, val); if (val != oval) { set_cur_ctl_value(cval, cval->control << 8, val); return 1; } return 0; } /* alsa control interface for processing/extension unit */ static const struct snd_kcontrol_new mixer_procunit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = mixer_ctl_feature_info, .get = mixer_ctl_procunit_get, .put = mixer_ctl_procunit_put, }; /* * predefined data for processing units */ struct procunit_value_info { int control; const char *suffix; int val_type; int min_value; }; struct procunit_info { int type; char *name; const struct procunit_value_info *values; }; static const struct procunit_value_info undefined_proc_info[] = { { 0x00, "Control Undefined", 0 }, { 0 } }; static const struct procunit_value_info updown_proc_info[] = { { UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static const struct procunit_value_info prologic_proc_info[] = { { UAC_DP_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DP_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static const struct procunit_value_info threed_enh_proc_info[] = { { UAC_3D_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_3D_SPACE, "Spaciousness", USB_MIXER_U8 }, { 0 } }; static const struct procunit_value_info reverb_proc_info[] = { { UAC_REVERB_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_REVERB_LEVEL, "Level", USB_MIXER_U8 }, { UAC_REVERB_TIME, "Time", USB_MIXER_U16 }, { UAC_REVERB_FEEDBACK, "Feedback", USB_MIXER_U8 }, { 0 } }; static const struct procunit_value_info chorus_proc_info[] = { { UAC_CHORUS_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_CHORUS_LEVEL, "Level", USB_MIXER_U8 }, { UAC_CHORUS_RATE, "Rate", USB_MIXER_U16 }, { UAC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 }, { 0 } }; static const struct procunit_value_info dcr_proc_info[] = { { UAC_DCR_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DCR_RATE, "Ratio", USB_MIXER_U16 }, { UAC_DCR_MAXAMPL, "Max Amp", USB_MIXER_S16 }, { UAC_DCR_THRESHOLD, "Threshold", USB_MIXER_S16 }, { UAC_DCR_ATTACK_TIME, "Attack Time", USB_MIXER_U16 }, { UAC_DCR_RELEASE_TIME, "Release Time", USB_MIXER_U16 }, { 0 } }; static const struct procunit_info procunits[] = { { UAC_PROCESS_UP_DOWNMIX, "Up Down", updown_proc_info }, { UAC_PROCESS_DOLBY_PROLOGIC, "Dolby Prologic", prologic_proc_info }, { UAC_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", threed_enh_proc_info }, { UAC_PROCESS_REVERB, "Reverb", reverb_proc_info }, { UAC_PROCESS_CHORUS, "Chorus", chorus_proc_info }, { UAC_PROCESS_DYN_RANGE_COMP, "DCR", dcr_proc_info }, { 0 }, }; static const struct procunit_value_info uac3_updown_proc_info[] = { { UAC3_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static const struct procunit_value_info uac3_stereo_ext_proc_info[] = { { UAC3_EXT_WIDTH_CONTROL, "Width Control", USB_MIXER_U8 }, { 0 } }; static const struct procunit_info uac3_procunits[] = { { UAC3_PROCESS_UP_DOWNMIX, "Up Down", uac3_updown_proc_info }, { UAC3_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", uac3_stereo_ext_proc_info }, { UAC3_PROCESS_MULTI_FUNCTION, "Multi-Function", undefined_proc_info }, { 0 }, }; /* * predefined data for extension units */ static const struct procunit_value_info clock_rate_xu_info[] = { { USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 }, { 0 } }; static const struct procunit_value_info clock_source_xu_info[] = { { USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN }, { 0 } }; static const struct procunit_value_info spdif_format_xu_info[] = { { USB_XU_DIGITAL_FORMAT_SELECTOR, "SPDIF/AC3", USB_MIXER_BOOLEAN }, { 0 } }; static const struct procunit_value_info soft_limit_xu_info[] = { { USB_XU_SOFT_LIMIT_SELECTOR, " ", USB_MIXER_BOOLEAN }, { 0 } }; static const struct procunit_info extunits[] = { { USB_XU_CLOCK_RATE, "Clock rate", clock_rate_xu_info }, { USB_XU_CLOCK_SOURCE, "DigitalIn CLK source", clock_source_xu_info }, { USB_XU_DIGITAL_IO_STATUS, "DigitalOut format:", spdif_format_xu_info }, { USB_XU_DEVICE_OPTIONS, "AnalogueIn Soft Limit", soft_limit_xu_info }, { 0 } }; /* * build a processing/extension unit */ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw_desc, const struct procunit_info *list, bool extension_unit) { struct uac_processing_unit_descriptor *desc = raw_desc; int num_ins; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; int i, err, nameid, type, len, val; const struct procunit_info *info; const struct procunit_value_info *valinfo; const struct usbmix_name_map *map; static const struct procunit_value_info default_value_info[] = { { 0x01, "Switch", USB_MIXER_BOOLEAN }, { 0 } }; static const struct procunit_info default_info = { 0, NULL, default_value_info }; const char *name = extension_unit ? "Extension Unit" : "Processing Unit"; num_ins = desc->bNrInPins; for (i = 0; i < num_ins; i++) { err = parse_audio_unit(state, desc->baSourceID[i]); if (err < 0) return err; } type = le16_to_cpu(desc->wProcessType); for (info = list; info && info->type; info++) if (info->type == type) break; if (!info || !info->type) info = &default_info; for (valinfo = info->values; valinfo->control; valinfo++) { __u8 *controls = uac_processing_unit_bmControls(desc, state->mixer->protocol); if (state->mixer->protocol == UAC_VERSION_1) { if (!(controls[valinfo->control / 8] & BIT((valinfo->control % 8) - 1))) continue; } else { /* UAC_VERSION_2/3 */ if (!uac_v2v3_control_is_readable(controls[valinfo->control / 8], valinfo->control)) continue; } map = find_map(state->map, unitid, valinfo->control); if (check_ignored_ctl(map)) continue; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return -ENOMEM; snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid); cval->control = valinfo->control; cval->val_type = valinfo->val_type; cval->channels = 1; if (state->mixer->protocol > UAC_VERSION_1 && !uac_v2v3_control_is_writeable(controls[valinfo->control / 8], valinfo->control)) cval->master_readonly = 1; /* get min/max values */ switch (type) { case UAC_PROCESS_UP_DOWNMIX: { bool mode_sel = false; switch (state->mixer->protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: if (cval->control == UAC_UD_MODE_SELECT) mode_sel = true; break; case UAC_VERSION_3: if (cval->control == UAC3_UD_MODE_SELECT) mode_sel = true; break; } if (mode_sel) { __u8 *control_spec = uac_processing_unit_specific(desc, state->mixer->protocol); cval->min = 1; cval->max = control_spec[0]; cval->res = 1; cval->initialized = 1; break; } get_min_max(cval, valinfo->min_value); break; } case USB_XU_CLOCK_RATE: /* * E-Mu USB 0404/0202/TrackerPre/0204 * samplerate control quirk */ cval->min = 0; cval->max = 5; cval->res = 1; cval->initialized = 1; break; default: get_min_max(cval, valinfo->min_value); break; } err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0) { usb_mixer_elem_info_free(cval); return -EINVAL; } kctl = snd_ctl_new1(&mixer_procunit_ctl, cval); if (!kctl) { usb_mixer_elem_info_free(cval); return -ENOMEM; } kctl->private_free = snd_usb_mixer_elem_free; if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) { /* nothing */ ; } else if (info->name) { strscpy(kctl->id.name, info->name, sizeof(kctl->id.name)); } else { if (extension_unit) nameid = uac_extension_unit_iExtension(desc, state->mixer->protocol); else nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol); len = 0; if (nameid) len = snd_usb_copy_string_desc(state->chip, nameid, kctl->id.name, sizeof(kctl->id.name)); if (!len) strscpy(kctl->id.name, name, sizeof(kctl->id.name)); } append_ctl_name(kctl, " "); append_ctl_name(kctl, valinfo->suffix); usb_audio_dbg(state->chip, "[%d] PU [%s] ch = %d, val = %d/%d\n", cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max); err = snd_usb_mixer_add_control(&cval->head, kctl); if (err < 0) return err; } return 0; } static int parse_audio_processing_unit(struct mixer_build *state, int unitid, void *raw_desc) { switch (state->mixer->protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: return build_audio_procunit(state, unitid, raw_desc, procunits, false); case UAC_VERSION_3: return build_audio_procunit(state, unitid, raw_desc, uac3_procunits, false); } } static int parse_audio_extension_unit(struct mixer_build *state, int unitid, void *raw_desc) { /* * Note that we parse extension units with processing unit descriptors. * That's ok as the layout is the same. */ return build_audio_procunit(state, unitid, raw_desc, extunits, true); } /* * Selector Unit */ /* * info callback for selector unit * use an enumerator type for routing */ static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); const char **itemlist = (const char **)kcontrol->private_value; if (snd_BUG_ON(!itemlist)) return -EINVAL; return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist); } /* get callback for selector unit */ static int mixer_ctl_selector_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); int val, err; err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0) { ucontrol->value.enumerated.item[0] = 0; return filter_error(cval, err); } val = get_relative_value(cval, val); ucontrol->value.enumerated.item[0] = val; return 0; } /* put callback for selector unit */ static int mixer_ctl_selector_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = snd_kcontrol_chip(kcontrol); int val, oval, err; err = get_cur_ctl_value(cval, cval->control << 8, &oval); if (err < 0) return filter_error(cval, err); val = ucontrol->value.enumerated.item[0]; if (val < 0 || val >= cval->max) /* here cval->max = # elements */ return -EINVAL; val = get_abs_value(cval, val); if (val != oval) { set_cur_ctl_value(cval, cval->control << 8, val); return 1; } return 0; } /* alsa control interface for selector unit */ static const struct snd_kcontrol_new mixer_selectunit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = mixer_ctl_selector_info, .get = mixer_ctl_selector_get, .put = mixer_ctl_selector_put, }; /* * private free callback. * free both private_data and private_value */ static void usb_mixer_selector_elem_free(struct snd_kcontrol *kctl) { int i, num_ins = 0; if (kctl->private_data) { struct usb_mixer_elem_info *cval = kctl->private_data; num_ins = cval->max; usb_mixer_elem_info_free(cval); kctl->private_data = NULL; } if (kctl->private_value) { char **itemlist = (char **)kctl->private_value; for (i = 0; i < num_ins; i++) kfree(itemlist[i]); kfree(itemlist); kctl->private_value = 0; } } /* * parse a selector unit */ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void *raw_desc) { struct uac_selector_unit_descriptor *desc = raw_desc; unsigned int i, nameid, len; int err; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; const struct usbmix_name_map *map; char **namelist; for (i = 0; i < desc->bNrInPins; i++) { err = parse_audio_unit(state, desc->baSourceID[i]); if (err < 0) return err; } if (desc->bNrInPins == 1) /* only one ? nonsense! */ return 0; map = find_map(state->map, unitid, 0); if (check_ignored_ctl(map)) return 0; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) return -ENOMEM; snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid); cval->val_type = USB_MIXER_U8; cval->channels = 1; cval->min = 1; cval->max = desc->bNrInPins; cval->res = 1; cval->initialized = 1; switch (state->mixer->protocol) { case UAC_VERSION_1: default: cval->control = 0; break; case UAC_VERSION_2: case UAC_VERSION_3: if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR || desc->bDescriptorSubtype == UAC3_CLOCK_SELECTOR) cval->control = UAC2_CX_CLOCK_SELECTOR; else /* UAC2/3_SELECTOR_UNIT */ cval->control = UAC2_SU_SELECTOR; break; } namelist = kcalloc(desc->bNrInPins, sizeof(char *), GFP_KERNEL); if (!namelist) { err = -ENOMEM; goto error_cval; } #define MAX_ITEM_NAME_LEN 64 for (i = 0; i < desc->bNrInPins; i++) { struct usb_audio_term iterm; namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL); if (!namelist[i]) { err = -ENOMEM; goto error_name; } len = check_mapped_selector_name(state, unitid, i, namelist[i], MAX_ITEM_NAME_LEN); if (! len && check_input_term(state, desc->baSourceID[i], &iterm) >= 0) len = get_term_name(state->chip, &iterm, namelist[i], MAX_ITEM_NAME_LEN, 0); if (! len) sprintf(namelist[i], "Input %u", i); } kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval); if (! kctl) { usb_audio_err(state->chip, "cannot malloc kcontrol\n"); err = -ENOMEM; goto error_name; } kctl->private_value = (unsigned long)namelist; kctl->private_free = usb_mixer_selector_elem_free; /* check the static mapping table at first */ len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); if (!len) { /* no mapping ? */ switch (state->mixer->protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: /* if iSelector is given, use it */ nameid = uac_selector_unit_iSelector(desc); if (nameid) len = snd_usb_copy_string_desc(state->chip, nameid, kctl->id.name, sizeof(kctl->id.name)); break; case UAC_VERSION_3: /* TODO: Class-Specific strings not yet supported */ break; } /* ... or pick up the terminal name at next */ if (!len) len = get_term_name(state->chip, &state->oterm, kctl->id.name, sizeof(kctl->id.name), 0); /* ... or use the fixed string "USB" as the last resort */ if (!len) strscpy(kctl->id.name, "USB", sizeof(kctl->id.name)); /* and add the proper suffix */ if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR || desc->bDescriptorSubtype == UAC3_CLOCK_SELECTOR) append_ctl_name(kctl, " Clock Source"); else if ((state->oterm.type & 0xff00) == 0x0100) append_ctl_name(kctl, " Capture Source"); else append_ctl_name(kctl, " Playback Source"); } usb_audio_dbg(state->chip, "[%d] SU [%s] items = %d\n", cval->head.id, kctl->id.name, desc->bNrInPins); return snd_usb_mixer_add_control(&cval->head, kctl); error_name: for (i = 0; i < desc->bNrInPins; i++) kfree(namelist[i]); kfree(namelist); error_cval: usb_mixer_elem_info_free(cval); return err; } /* * parse an audio unit recursively */ static int parse_audio_unit(struct mixer_build *state, int unitid) { unsigned char *p1; int protocol = state->mixer->protocol; if (test_and_set_bit(unitid, state->unitbitmap)) return 0; /* the unit already visited */ p1 = find_audio_control_unit(state, unitid); if (!p1) { usb_audio_err(state->chip, "unit %d not found!\n", unitid); return -EINVAL; } if (!snd_usb_validate_audio_desc(p1, protocol)) { usb_audio_dbg(state->chip, "invalid unit %d\n", unitid); return 0; /* skip invalid unit */ } switch (PTYPE(protocol, p1[2])) { case PTYPE(UAC_VERSION_1, UAC_INPUT_TERMINAL): case PTYPE(UAC_VERSION_2, UAC_INPUT_TERMINAL): case PTYPE(UAC_VERSION_3, UAC_INPUT_TERMINAL): return parse_audio_input_terminal(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC_MIXER_UNIT): case PTYPE(UAC_VERSION_2, UAC_MIXER_UNIT): case PTYPE(UAC_VERSION_3, UAC3_MIXER_UNIT): return parse_audio_mixer_unit(state, unitid, p1); case PTYPE(UAC_VERSION_2, UAC2_CLOCK_SOURCE): case PTYPE(UAC_VERSION_3, UAC3_CLOCK_SOURCE): return parse_clock_source_unit(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC_SELECTOR_UNIT): case PTYPE(UAC_VERSION_2, UAC_SELECTOR_UNIT): case PTYPE(UAC_VERSION_3, UAC3_SELECTOR_UNIT): case PTYPE(UAC_VERSION_2, UAC2_CLOCK_SELECTOR): case PTYPE(UAC_VERSION_3, UAC3_CLOCK_SELECTOR): return parse_audio_selector_unit(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC_FEATURE_UNIT): case PTYPE(UAC_VERSION_2, UAC_FEATURE_UNIT): case PTYPE(UAC_VERSION_3, UAC3_FEATURE_UNIT): return parse_audio_feature_unit(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC1_PROCESSING_UNIT): case PTYPE(UAC_VERSION_2, UAC2_PROCESSING_UNIT_V2): case PTYPE(UAC_VERSION_3, UAC3_PROCESSING_UNIT): return parse_audio_processing_unit(state, unitid, p1); case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT): case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2): case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT): return parse_audio_extension_unit(state, unitid, p1); case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT): case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT): return 0; /* FIXME - effect units not implemented yet */ default: usb_audio_err(state->chip, "unit %u: unexpected type 0x%02x\n", unitid, p1[2]); return -EINVAL; } } static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) { /* kill pending URBs */ snd_usb_mixer_disconnect(mixer); kfree(mixer->id_elems); if (mixer->urb) { kfree(mixer->urb->transfer_buffer); usb_free_urb(mixer->urb); } usb_free_urb(mixer->rc_urb); kfree(mixer->rc_setup_packet); kfree(mixer); } static int snd_usb_mixer_dev_free(struct snd_device *device) { struct usb_mixer_interface *mixer = device->device_data; snd_usb_mixer_free(mixer); return 0; } /* UAC3 predefined channels configuration */ struct uac3_badd_profile { int subclass; const char *name; int c_chmask; /* capture channels mask */ int p_chmask; /* playback channels mask */ int st_chmask; /* side tone mixing channel mask */ }; static const struct uac3_badd_profile uac3_badd_profiles[] = { { /* * BAIF, BAOF or combination of both * IN: Mono or Stereo cfg, Mono alt possible * OUT: Mono or Stereo cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_GENERIC_IO, .name = "GENERIC IO", .c_chmask = -1, /* dynamic channels */ .p_chmask = -1, /* dynamic channels */ }, { /* BAOF; Stereo only cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_HEADPHONE, .name = "HEADPHONE", .p_chmask = 3, }, { /* BAOF; Mono or Stereo cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_SPEAKER, .name = "SPEAKER", .p_chmask = -1, /* dynamic channels */ }, { /* BAIF; Mono or Stereo cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_MICROPHONE, .name = "MICROPHONE", .c_chmask = -1, /* dynamic channels */ }, { /* * BAIOF topology * IN: Mono only * OUT: Mono or Stereo cfg, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_HEADSET, .name = "HEADSET", .c_chmask = 1, .p_chmask = -1, /* dynamic channels */ .st_chmask = 1, }, { /* BAIOF; IN: Mono only; OUT: Stereo only, Mono alt possible */ .subclass = UAC3_FUNCTION_SUBCLASS_HEADSET_ADAPTER, .name = "HEADSET ADAPTER", .c_chmask = 1, .p_chmask = 3, .st_chmask = 1, }, { /* BAIF + BAOF; IN: Mono only; OUT: Mono only */ .subclass = UAC3_FUNCTION_SUBCLASS_SPEAKERPHONE, .name = "SPEAKERPHONE", .c_chmask = 1, .p_chmask = 1, }, { 0 } /* terminator */ }; static bool uac3_badd_func_has_valid_channels(struct usb_mixer_interface *mixer, const struct uac3_badd_profile *f, int c_chmask, int p_chmask) { /* * If both playback/capture channels are dynamic, make sure * at least one channel is present */ if (f->c_chmask < 0 && f->p_chmask < 0) { if (!c_chmask && !p_chmask) { usb_audio_warn(mixer->chip, "BAAD %s: no channels?", f->name); return false; } return true; } if ((f->c_chmask < 0 && !c_chmask) || (f->c_chmask >= 0 && f->c_chmask != c_chmask)) { usb_audio_warn(mixer->chip, "BAAD %s c_chmask mismatch", f->name); return false; } if ((f->p_chmask < 0 && !p_chmask) || (f->p_chmask >= 0 && f->p_chmask != p_chmask)) { usb_audio_warn(mixer->chip, "BAAD %s p_chmask mismatch", f->name); return false; } return true; } /* * create mixer controls for UAC3 BADD profiles * * UAC3 BADD device doesn't contain CS descriptors thus we will guess everything * * BADD device may contain Mixer Unit, which doesn't have any controls, skip it */ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer, int ctrlif) { struct usb_device *dev = mixer->chip->dev; struct usb_interface_assoc_descriptor *assoc; int badd_profile = mixer->chip->badd_profile; const struct uac3_badd_profile *f; const struct usbmix_ctl_map *map; int p_chmask = 0, c_chmask = 0, st_chmask = 0; int i; assoc = usb_ifnum_to_if(dev, ctrlif)->intf_assoc; /* Detect BADD capture/playback channels from AS EP descriptors */ for (i = 0; i < assoc->bInterfaceCount; i++) { int intf = assoc->bFirstInterface + i; struct usb_interface *iface; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; unsigned int maxpacksize; char dir_in; int chmask, num; if (intf == ctrlif) continue; iface = usb_ifnum_to_if(dev, intf); if (!iface) continue; num = iface->num_altsetting; if (num < 2) return -EINVAL; /* * The number of Channels in an AudioStreaming interface * and the audio sample bit resolution (16 bits or 24 * bits) can be derived from the wMaxPacketSize field in * the Standard AS Audio Data Endpoint descriptor in * Alternate Setting 1 */ alts = &iface->altsetting[1]; altsd = get_iface_desc(alts); if (altsd->bNumEndpoints < 1) return -EINVAL; /* check direction */ dir_in = (get_endpoint(alts, 0)->bEndpointAddress & USB_DIR_IN); maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); switch (maxpacksize) { default: usb_audio_err(mixer->chip, "incorrect wMaxPacketSize 0x%x for BADD profile\n", maxpacksize); return -EINVAL; case UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_16: case UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_16: case UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_24: case UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_24: chmask = 1; break; case UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_16: case UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_16: case UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_24: case UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_24: chmask = 3; break; } if (dir_in) c_chmask = chmask; else p_chmask = chmask; } usb_audio_dbg(mixer->chip, "UAC3 BADD profile 0x%x: detected c_chmask=%d p_chmask=%d\n", badd_profile, c_chmask, p_chmask); /* check the mapping table */ for (map = uac3_badd_usbmix_ctl_maps; map->id; map++) { if (map->id == badd_profile) break; } if (!map->id) return -EINVAL; for (f = uac3_badd_profiles; f->name; f++) { if (badd_profile == f->subclass) break; } if (!f->name) return -EINVAL; if (!uac3_badd_func_has_valid_channels(mixer, f, c_chmask, p_chmask)) return -EINVAL; st_chmask = f->st_chmask; /* Playback */ if (p_chmask) { /* Master channel, always writable */ build_feature_ctl_badd(mixer, 0, UAC_FU_MUTE, UAC3_BADD_FU_ID2, map->map); /* Mono/Stereo volume channels, always writable */ build_feature_ctl_badd(mixer, p_chmask, UAC_FU_VOLUME, UAC3_BADD_FU_ID2, map->map); } /* Capture */ if (c_chmask) { /* Master channel, always writable */ build_feature_ctl_badd(mixer, 0, UAC_FU_MUTE, UAC3_BADD_FU_ID5, map->map); /* Mono/Stereo volume channels, always writable */ build_feature_ctl_badd(mixer, c_chmask, UAC_FU_VOLUME, UAC3_BADD_FU_ID5, map->map); } /* Side tone-mixing */ if (st_chmask) { /* Master channel, always writable */ build_feature_ctl_badd(mixer, 0, UAC_FU_MUTE, UAC3_BADD_FU_ID7, map->map); /* Mono volume channel, always writable */ build_feature_ctl_badd(mixer, 1, UAC_FU_VOLUME, UAC3_BADD_FU_ID7, map->map); } /* Insertion Control */ if (f->subclass == UAC3_FUNCTION_SUBCLASS_HEADSET_ADAPTER) { struct usb_audio_term iterm, oterm; /* Input Term - Insertion control */ memset(&iterm, 0, sizeof(iterm)); iterm.id = UAC3_BADD_IT_ID4; iterm.type = UAC_BIDIR_TERMINAL_HEADSET; build_connector_control(mixer, map->map, &iterm, true); /* Output Term - Insertion control */ memset(&oterm, 0, sizeof(oterm)); oterm.id = UAC3_BADD_OT_ID3; oterm.type = UAC_BIDIR_TERMINAL_HEADSET; build_connector_control(mixer, map->map, &oterm, false); } return 0; } /* * create mixer controls * * walk through all UAC_OUTPUT_TERMINAL descriptors to search for mixers */ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) { struct mixer_build state; int err; const struct usbmix_ctl_map *map; void *p; memset(&state, 0, sizeof(state)); state.chip = mixer->chip; state.mixer = mixer; state.buffer = mixer->hostif->extra; state.buflen = mixer->hostif->extralen; /* check the mapping table */ for (map = usbmix_ctl_maps; map->id; map++) { if (map->id == state.chip->usb_id) { state.map = map->map; state.selector_map = map->selector_map; mixer->connector_map = map->connector_map; break; } } p = NULL; while ((p = snd_usb_find_csint_desc(mixer->hostif->extra, mixer->hostif->extralen, p, UAC_OUTPUT_TERMINAL)) != NULL) { if (!snd_usb_validate_audio_desc(p, mixer->protocol)) continue; /* skip invalid descriptor */ if (mixer->protocol == UAC_VERSION_1) { struct uac1_output_terminal_descriptor *desc = p; /* mark terminal ID as visited */ set_bit(desc->bTerminalID, state.unitbitmap); state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; } else if (mixer->protocol == UAC_VERSION_2) { struct uac2_output_terminal_descriptor *desc = p; /* mark terminal ID as visited */ set_bit(desc->bTerminalID, state.unitbitmap); state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; /* * For UAC2, use the same approach to also add the * clock selectors */ err = parse_audio_unit(&state, desc->bCSourceID); if (err < 0 && err != -EINVAL) return err; if ((state.oterm.type & 0xff00) != 0x0100 && uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls), UAC2_TE_CONNECTOR)) { build_connector_control(state.mixer, state.map, &state.oterm, false); } } else { /* UAC_VERSION_3 */ struct uac3_output_terminal_descriptor *desc = p; /* mark terminal ID as visited */ set_bit(desc->bTerminalID, state.unitbitmap); state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = le16_to_cpu(desc->wTerminalDescrStr); err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; /* * For UAC3, use the same approach to also add the * clock selectors */ err = parse_audio_unit(&state, desc->bCSourceID); if (err < 0 && err != -EINVAL) return err; if ((state.oterm.type & 0xff00) != 0x0100 && uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls), UAC3_TE_INSERTION)) { build_connector_control(state.mixer, state.map, &state.oterm, false); } } } return 0; } static int delegate_notify(struct usb_mixer_interface *mixer, int unitid, u8 *control, u8 *channel) { const struct usbmix_connector_map *map = mixer->connector_map; if (!map) return unitid; for (; map->id; map++) { if (map->id == unitid) { if (control && map->control) *control = map->control; if (channel && map->channel) *channel = map->channel; return map->delegated_id; } } return unitid; } void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_list *list; unitid = delegate_notify(mixer, unitid, NULL, NULL); for_each_mixer_elem(list, mixer, unitid) { struct usb_mixer_elem_info *info; if (!list->is_std_info) continue; info = mixer_elem_list_to_info(list); /* invalidate cache, so the value is read from the device */ info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &list->kctl->id); } } static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, struct usb_mixer_elem_list *list) { struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); static const char * const val_types[] = { [USB_MIXER_BOOLEAN] = "BOOLEAN", [USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN", [USB_MIXER_S8] = "S8", [USB_MIXER_U8] = "U8", [USB_MIXER_S16] = "S16", [USB_MIXER_U16] = "U16", [USB_MIXER_S32] = "S32", [USB_MIXER_U32] = "U32", [USB_MIXER_BESPOKEN] = "BESPOKEN", }; snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " "channels=%i, type=\"%s\"\n", cval->head.id, cval->control, cval->cmask, cval->channels, val_types[cval->val_type]); snd_iprintf(buffer, " Volume: min=%i, max=%i, dBmin=%i, dBmax=%i\n", cval->min, cval->max, cval->dBmin, cval->dBmax); } static void snd_usb_mixer_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; struct usb_mixer_interface *mixer; struct usb_mixer_elem_list *list; int unitid; list_for_each_entry(mixer, &chip->mixer_list, list) { snd_iprintf(buffer, "USB Mixer: usb_id=0x%08x, ctrlif=%i, ctlerr=%i\n", chip->usb_id, mixer_ctrl_intf(mixer), mixer->ignore_ctl_error); snd_iprintf(buffer, "Card: %s\n", chip->card->longname); for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) { for_each_mixer_elem(list, mixer, unitid) { snd_iprintf(buffer, " Unit: %i\n", list->id); if (list->kctl) snd_iprintf(buffer, " Control: name=\"%s\", index=%i\n", list->kctl->id.name, list->kctl->id.index); if (list->dump) list->dump(buffer, list); } } } } static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, int attribute, int value, int index) { struct usb_mixer_elem_list *list; __u8 unitid = (index >> 8) & 0xff; __u8 control = (value >> 8) & 0xff; __u8 channel = value & 0xff; unsigned int count = 0; if (channel >= MAX_CHANNELS) { usb_audio_dbg(mixer->chip, "%s(): bogus channel number %d\n", __func__, channel); return; } unitid = delegate_notify(mixer, unitid, &control, &channel); for_each_mixer_elem(list, mixer, unitid) count++; if (count == 0) return; for_each_mixer_elem(list, mixer, unitid) { struct usb_mixer_elem_info *info; if (!list->kctl) continue; if (!list->is_std_info) continue; info = mixer_elem_list_to_info(list); if (count > 1 && info->control != control) continue; switch (attribute) { case UAC2_CS_CUR: /* invalidate cache, so the value is read from the device */ if (channel) info->cached &= ~BIT(channel); else /* master channel */ info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &info->head.kctl->id); break; case UAC2_CS_RANGE: /* TODO */ break; case UAC2_CS_MEM: /* TODO */ break; default: usb_audio_dbg(mixer->chip, "unknown attribute %d in interrupt\n", attribute); break; } /* switch */ } } static void snd_usb_mixer_interrupt(struct urb *urb) { struct usb_mixer_interface *mixer = urb->context; int len = urb->actual_length; int ustatus = urb->status; if (ustatus != 0) goto requeue; if (mixer->protocol == UAC_VERSION_1) { struct uac1_status_word *status; for (status = urb->transfer_buffer; len >= sizeof(*status); len -= sizeof(*status), status++) { dev_dbg(&urb->dev->dev, "status interrupt: %02x %02x\n", status->bStatusType, status->bOriginator); /* ignore any notifications not from the control interface */ if ((status->bStatusType & UAC1_STATUS_TYPE_ORIG_MASK) != UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF) continue; if (status->bStatusType & UAC1_STATUS_TYPE_MEM_CHANGED) snd_usb_mixer_rc_memory_change(mixer, status->bOriginator); else snd_usb_mixer_notify_id(mixer, status->bOriginator); } } else { /* UAC_VERSION_2 */ struct uac2_interrupt_data_msg *msg; for (msg = urb->transfer_buffer; len >= sizeof(*msg); len -= sizeof(*msg), msg++) { /* drop vendor specific and endpoint requests */ if ((msg->bInfo & UAC2_INTERRUPT_DATA_MSG_VENDOR) || (msg->bInfo & UAC2_INTERRUPT_DATA_MSG_EP)) continue; snd_usb_mixer_interrupt_v2(mixer, msg->bAttribute, le16_to_cpu(msg->wValue), le16_to_cpu(msg->wIndex)); } } requeue: if (ustatus != -ENOENT && ustatus != -ECONNRESET && ustatus != -ESHUTDOWN) { urb->dev = mixer->chip->dev; usb_submit_urb(urb, GFP_ATOMIC); } } /* create the handler for the optional status interrupt endpoint */ static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer) { struct usb_endpoint_descriptor *ep; void *transfer_buffer; int buffer_length; unsigned int epnum; /* we need one interrupt input endpoint */ if (get_iface_desc(mixer->hostif)->bNumEndpoints < 1) return 0; ep = get_endpoint(mixer->hostif, 0); if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_int(ep)) return 0; epnum = usb_endpoint_num(ep); buffer_length = le16_to_cpu(ep->wMaxPacketSize); transfer_buffer = kmalloc(buffer_length, GFP_KERNEL); if (!transfer_buffer) return -ENOMEM; mixer->urb = usb_alloc_urb(0, GFP_KERNEL); if (!mixer->urb) { kfree(transfer_buffer); return -ENOMEM; } usb_fill_int_urb(mixer->urb, mixer->chip->dev, usb_rcvintpipe(mixer->chip->dev, epnum), transfer_buffer, buffer_length, snd_usb_mixer_interrupt, mixer, ep->bInterval); usb_submit_urb(mixer->urb, GFP_KERNEL); return 0; } int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif) { static const struct snd_device_ops dev_ops = { .dev_free = snd_usb_mixer_dev_free }; struct usb_mixer_interface *mixer; int err; strcpy(chip->card->mixername, "USB Mixer"); mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); if (!mixer) return -ENOMEM; mixer->chip = chip; mixer->ignore_ctl_error = !!(chip->quirk_flags & QUIRK_FLAG_IGNORE_CTL_ERROR); mixer->id_elems = kcalloc(MAX_ID_ELEMS, sizeof(*mixer->id_elems), GFP_KERNEL); if (!mixer->id_elems) { kfree(mixer); return -ENOMEM; } mixer->hostif = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; switch (get_iface_desc(mixer->hostif)->bInterfaceProtocol) { case UAC_VERSION_1: default: mixer->protocol = UAC_VERSION_1; break; case UAC_VERSION_2: mixer->protocol = UAC_VERSION_2; break; case UAC_VERSION_3: mixer->protocol = UAC_VERSION_3; break; } if (mixer->protocol == UAC_VERSION_3 && chip->badd_profile >= UAC3_FUNCTION_SUBCLASS_GENERIC_IO) { err = snd_usb_mixer_controls_badd(mixer, ctrlif); if (err < 0) goto _error; } else { err = snd_usb_mixer_controls(mixer); if (err < 0) goto _error; } err = snd_usb_mixer_status_create(mixer); if (err < 0) goto _error; err = snd_usb_mixer_apply_create_quirk(mixer); if (err < 0) goto _error; err = snd_device_new(chip->card, SNDRV_DEV_CODEC, mixer, &dev_ops); if (err < 0) goto _error; if (list_empty(&chip->mixer_list)) snd_card_ro_proc_new(chip->card, "usbmixer", chip, snd_usb_mixer_proc_read); list_add(&mixer->list, &chip->mixer_list); return 0; _error: snd_usb_mixer_free(mixer); return err; } void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) { if (mixer->disconnected) return; if (mixer->urb) usb_kill_urb(mixer->urb); if (mixer->rc_urb) usb_kill_urb(mixer->rc_urb); if (mixer->private_free) mixer->private_free(mixer); mixer->disconnected = true; } /* stop any bus activity of a mixer */ static void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer) { usb_kill_urb(mixer->urb); usb_kill_urb(mixer->rc_urb); } static int snd_usb_mixer_activate(struct usb_mixer_interface *mixer) { int err; if (mixer->urb) { err = usb_submit_urb(mixer->urb, GFP_NOIO); if (err < 0) return err; } return 0; } int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer) { snd_usb_mixer_inactivate(mixer); if (mixer->private_suspend) mixer->private_suspend(mixer); return 0; } static int restore_mixer_value(struct usb_mixer_elem_list *list) { struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); int c, err, idx; if (cval->val_type == USB_MIXER_BESPOKEN) return 0; if (cval->cmask) { idx = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & BIT(c))) continue; if (cval->cached & BIT(c + 1)) { err = snd_usb_set_cur_mix_value(cval, c + 1, idx, cval->cache_val[idx]); if (err < 0) break; } idx++; } } else { /* master */ if (cval->cached) snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val); } return 0; } int snd_usb_mixer_resume(struct usb_mixer_interface *mixer) { struct usb_mixer_elem_list *list; int id, err; /* restore cached mixer values */ for (id = 0; id < MAX_ID_ELEMS; id++) { for_each_mixer_elem(list, mixer, id) { if (list->resume) { err = list->resume(list); if (err < 0) return err; } } } snd_usb_mixer_resume_quirk(mixer); return snd_usb_mixer_activate(mixer); } void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, struct usb_mixer_interface *mixer, int unitid) { list->mixer = mixer; list->id = unitid; list->dump = snd_usb_mixer_dump_cval; list->resume = restore_mixer_value; } |
10 10 10 1 10 10 12 12 12 11 10 12 12 1 1 1 1 1 9 9 9 12 17 16 16 15 14 12 12 12 12 10 1 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2000-2002 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001-2002, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ #include <linux/usb.h> #include "iforce.h" struct iforce_usb { struct iforce iforce; struct usb_device *usbdev; struct usb_interface *intf; struct urb *irq, *out; u8 data_in[IFORCE_MAX_LENGTH] ____cacheline_aligned; u8 data_out[IFORCE_MAX_LENGTH] ____cacheline_aligned; }; static void __iforce_usb_xmit(struct iforce *iforce) { struct iforce_usb *iforce_usb = container_of(iforce, struct iforce_usb, iforce); int n, c; guard(spinlock_irqsave)(&iforce->xmit_lock); if (iforce->xmit.head == iforce->xmit.tail) { iforce_clear_xmit_and_wake(iforce); return; } ((char *)iforce_usb->out->transfer_buffer)[0] = iforce->xmit.buf[iforce->xmit.tail]; XMIT_INC(iforce->xmit.tail, 1); n = iforce->xmit.buf[iforce->xmit.tail]; XMIT_INC(iforce->xmit.tail, 1); iforce_usb->out->transfer_buffer_length = n + 1; iforce_usb->out->dev = iforce_usb->usbdev; /* Copy rest of data then */ c = CIRC_CNT_TO_END(iforce->xmit.head, iforce->xmit.tail, XMIT_SIZE); if (n < c) c = n; memcpy(iforce_usb->out->transfer_buffer + 1, &iforce->xmit.buf[iforce->xmit.tail], c); if (n != c) { memcpy(iforce_usb->out->transfer_buffer + 1 + c, &iforce->xmit.buf[0], n - c); } XMIT_INC(iforce->xmit.tail, n); n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC); if (n) { dev_warn(&iforce_usb->intf->dev, "usb_submit_urb failed %d\n", n); iforce_clear_xmit_and_wake(iforce); } /* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended. * As long as the urb completion handler is not called, the transmiting * is considered to be running */ } static void iforce_usb_xmit(struct iforce *iforce) { if (!test_and_set_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags)) __iforce_usb_xmit(iforce); } static int iforce_usb_get_id(struct iforce *iforce, u8 id, u8 *response_data, size_t *response_len) { struct iforce_usb *iforce_usb = container_of(iforce, struct iforce_usb, iforce); u8 *buf; int status; buf = kmalloc(IFORCE_MAX_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; status = usb_control_msg(iforce_usb->usbdev, usb_rcvctrlpipe(iforce_usb->usbdev, 0), id, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_INTERFACE, 0, 0, buf, IFORCE_MAX_LENGTH, 1000); if (status < 0) { dev_err(&iforce_usb->intf->dev, "usb_submit_urb failed: %d\n", status); } else if (buf[0] != id) { status = -EIO; } else { memcpy(response_data, buf, status); *response_len = status; status = 0; } kfree(buf); return status; } static int iforce_usb_start_io(struct iforce *iforce) { struct iforce_usb *iforce_usb = container_of(iforce, struct iforce_usb, iforce); if (usb_submit_urb(iforce_usb->irq, GFP_KERNEL)) return -EIO; return 0; } static void iforce_usb_stop_io(struct iforce *iforce) { struct iforce_usb *iforce_usb = container_of(iforce, struct iforce_usb, iforce); usb_kill_urb(iforce_usb->irq); usb_kill_urb(iforce_usb->out); } static const struct iforce_xport_ops iforce_usb_xport_ops = { .xmit = iforce_usb_xmit, .get_id = iforce_usb_get_id, .start_io = iforce_usb_start_io, .stop_io = iforce_usb_stop_io, }; static void iforce_usb_irq(struct urb *urb) { struct iforce_usb *iforce_usb = urb->context; struct iforce *iforce = &iforce_usb->iforce; struct device *dev = &iforce_usb->intf->dev; int status; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, urb->status); return; default: dev_dbg(dev, "%s - urb has status of: %d\n", __func__, urb->status); goto exit; } iforce_process_packet(iforce, iforce_usb->data_in[0], iforce_usb->data_in + 1, urb->actual_length - 1); exit: status = usb_submit_urb(urb, GFP_ATOMIC); if (status) dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, status); } static void iforce_usb_out(struct urb *urb) { struct iforce_usb *iforce_usb = urb->context; struct iforce *iforce = &iforce_usb->iforce; if (urb->status) { dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n", urb->status); iforce_clear_xmit_and_wake(iforce); return; } __iforce_usb_xmit(iforce); wake_up_all(&iforce->wait); } static int iforce_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_host_interface *interface; struct usb_endpoint_descriptor *epirq, *epout; struct iforce_usb *iforce_usb; int err = -ENOMEM; interface = intf->cur_altsetting; if (interface->desc.bNumEndpoints < 2) return -ENODEV; epirq = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(epirq)) return -ENODEV; epout = &interface->endpoint[1].desc; if (!usb_endpoint_is_int_out(epout)) return -ENODEV; iforce_usb = kzalloc(sizeof(*iforce_usb), GFP_KERNEL); if (!iforce_usb) goto fail; iforce_usb->irq = usb_alloc_urb(0, GFP_KERNEL); if (!iforce_usb->irq) goto fail; iforce_usb->out = usb_alloc_urb(0, GFP_KERNEL); if (!iforce_usb->out) goto fail; iforce_usb->iforce.xport_ops = &iforce_usb_xport_ops; iforce_usb->usbdev = dev; iforce_usb->intf = intf; usb_fill_int_urb(iforce_usb->irq, dev, usb_rcvintpipe(dev, epirq->bEndpointAddress), iforce_usb->data_in, sizeof(iforce_usb->data_in), iforce_usb_irq, iforce_usb, epirq->bInterval); usb_fill_int_urb(iforce_usb->out, dev, usb_sndintpipe(dev, epout->bEndpointAddress), iforce_usb->data_out, sizeof(iforce_usb->data_out), iforce_usb_out, iforce_usb, epout->bInterval); err = iforce_init_device(&intf->dev, BUS_USB, &iforce_usb->iforce); if (err) goto fail; usb_set_intfdata(intf, iforce_usb); return 0; fail: if (iforce_usb) { usb_free_urb(iforce_usb->irq); usb_free_urb(iforce_usb->out); kfree(iforce_usb); } return err; } static void iforce_usb_disconnect(struct usb_interface *intf) { struct iforce_usb *iforce_usb = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); input_unregister_device(iforce_usb->iforce.dev); usb_free_urb(iforce_usb->irq); usb_free_urb(iforce_usb->out); kfree(iforce_usb); } static const struct usb_device_id iforce_usb_ids[] = { { USB_DEVICE(0x044f, 0xa01c) }, /* Thrustmaster Motor Sport GT */ { USB_DEVICE(0x046d, 0xc281) }, /* Logitech WingMan Force */ { USB_DEVICE(0x046d, 0xc291) }, /* Logitech WingMan Formula Force */ { USB_DEVICE(0x05ef, 0x020a) }, /* AVB Top Shot Pegasus */ { USB_DEVICE(0x05ef, 0x8884) }, /* AVB Mag Turbo Force */ { USB_DEVICE(0x05ef, 0x8888) }, /* AVB Top Shot FFB Racing Wheel */ { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */ { USB_DEVICE(0x06a3, 0xff04) }, /* Saitek R440 Force Wheel */ { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ { USB_DEVICE(0x06f8, 0x0003) }, /* Guillemot Jet Leader Force Feedback */ { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, iforce_usb_ids); struct usb_driver iforce_usb_driver = { .name = "iforce", .probe = iforce_usb_probe, .disconnect = iforce_usb_disconnect, .id_table = iforce_usb_ids, }; module_usb_driver(iforce_usb_driver); MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>, Johann Deneux <johann.deneux@gmail.com>"); MODULE_DESCRIPTION("USB I-Force joysticks and wheels driver"); MODULE_LICENSE("GPL"); |
218 365 366 366 365 367 312 366 365 294 365 364 345 344 280 345 343 345 366 366 366 366 366 344 342 345 345 345 344 345 345 345 342 345 344 343 345 342 345 55 212 212 212 212 238 290 290 238 52 276 66 278 212 278 278 278 222 222 56 222 223 222 222 55 55 56 56 211 366 366 366 54 56 56 55 55 55 55 15 56 56 311 311 312 311 311 312 311 312 218 219 311 244 311 219 312 312 312 312 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 | // SPDX-License-Identifier: GPL-2.0-only /* * proc/fs/generic.c --- generic routines for the proc-fs * * This file contains generic proc-fs routines for handling * directories and files. * * Copyright (C) 1991, 1992 Linus Torvalds. * Copyright (C) 1997 Theodore Ts'o */ #include <linux/cache.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/printk.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/idr.h> #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/uaccess.h> #include <linux/seq_file.h> #include "internal.h" static DEFINE_RWLOCK(proc_subdir_lock); struct kmem_cache *proc_dir_entry_cache __ro_after_init; void pde_free(struct proc_dir_entry *pde) { if (S_ISLNK(pde->mode)) kfree(pde->data); if (pde->name != pde->inline_name) kfree(pde->name); kmem_cache_free(proc_dir_entry_cache, pde); } static int proc_match(const char *name, struct proc_dir_entry *de, unsigned int len) { if (len < de->namelen) return -1; if (len > de->namelen) return 1; return memcmp(name, de->name, len); } static struct proc_dir_entry *pde_subdir_first(struct proc_dir_entry *dir) { return rb_entry_safe(rb_first(&dir->subdir), struct proc_dir_entry, subdir_node); } static struct proc_dir_entry *pde_subdir_next(struct proc_dir_entry *dir) { return rb_entry_safe(rb_next(&dir->subdir_node), struct proc_dir_entry, subdir_node); } static struct proc_dir_entry *pde_subdir_find(struct proc_dir_entry *dir, const char *name, unsigned int len) { struct rb_node *node = dir->subdir.rb_node; while (node) { struct proc_dir_entry *de = rb_entry(node, struct proc_dir_entry, subdir_node); int result = proc_match(name, de, len); if (result < 0) node = node->rb_left; else if (result > 0) node = node->rb_right; else return de; } return NULL; } static bool pde_subdir_insert(struct proc_dir_entry *dir, struct proc_dir_entry *de) { struct rb_root *root = &dir->subdir; struct rb_node **new = &root->rb_node, *parent = NULL; /* Figure out where to put new node */ while (*new) { struct proc_dir_entry *this = rb_entry(*new, struct proc_dir_entry, subdir_node); int result = proc_match(de->name, this, de->namelen); parent = *new; if (result < 0) new = &(*new)->rb_left; else if (result > 0) new = &(*new)->rb_right; else return false; } /* Add new node and rebalance tree. */ rb_link_node(&de->subdir_node, parent, new); rb_insert_color(&de->subdir_node, root); return true; } static int proc_notify_change(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { struct inode *inode = d_inode(dentry); struct proc_dir_entry *de = PDE(inode); int error; error = setattr_prepare(&nop_mnt_idmap, dentry, iattr); if (error) return error; setattr_copy(&nop_mnt_idmap, inode, iattr); proc_set_user(de, inode->i_uid, inode->i_gid); de->mode = inode->i_mode; return 0; } static int proc_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct proc_dir_entry *de = PDE(inode); if (de) { nlink_t nlink = READ_ONCE(de->nlink); if (nlink > 0) { set_nlink(inode, nlink); } } generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); return 0; } static const struct inode_operations proc_file_inode_operations = { .setattr = proc_notify_change, }; /* * This function parses a name such as "tty/driver/serial", and * returns the struct proc_dir_entry for "/proc/tty/driver", and * returns "serial" in residual. */ static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret, const char **residual) { const char *cp = name, *next; struct proc_dir_entry *de; de = *ret ?: &proc_root; while ((next = strchr(cp, '/')) != NULL) { de = pde_subdir_find(de, cp, next - cp); if (!de) { WARN(1, "name '%s'\n", name); return -ENOENT; } cp = next + 1; } *residual = cp; *ret = de; return 0; } static int xlate_proc_name(const char *name, struct proc_dir_entry **ret, const char **residual) { int rv; read_lock(&proc_subdir_lock); rv = __xlate_proc_name(name, ret, residual); read_unlock(&proc_subdir_lock); return rv; } static DEFINE_IDA(proc_inum_ida); #define PROC_DYNAMIC_FIRST 0xF0000000U /* * Return an inode number between PROC_DYNAMIC_FIRST and * 0xffffffff, or zero on failure. */ int proc_alloc_inum(unsigned int *inum) { int i; i = ida_alloc_max(&proc_inum_ida, UINT_MAX - PROC_DYNAMIC_FIRST, GFP_KERNEL); if (i < 0) return i; *inum = PROC_DYNAMIC_FIRST + (unsigned int)i; return 0; } void proc_free_inum(unsigned int inum) { ida_free(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); } static int proc_misc_d_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; if (atomic_read(&PDE(d_inode(dentry))->in_use) < 0) return 0; /* revalidate */ return 1; } static int proc_misc_d_delete(const struct dentry *dentry) { return atomic_read(&PDE(d_inode(dentry))->in_use) < 0; } static const struct dentry_operations proc_misc_dentry_ops = { .d_revalidate = proc_misc_d_revalidate, .d_delete = proc_misc_d_delete, }; /* * Don't create negative dentries here, return -ENOENT by hand * instead. */ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry, struct proc_dir_entry *de) { struct inode *inode; read_lock(&proc_subdir_lock); de = pde_subdir_find(de, dentry->d_name.name, dentry->d_name.len); if (de) { pde_get(de); read_unlock(&proc_subdir_lock); inode = proc_get_inode(dir->i_sb, de); if (!inode) return ERR_PTR(-ENOMEM); d_set_d_op(dentry, de->proc_dops); return d_splice_alias(inode, dentry); } read_unlock(&proc_subdir_lock); return ERR_PTR(-ENOENT); } struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct proc_fs_info *fs_info = proc_sb_info(dir->i_sb); if (fs_info->pidonly == PROC_PIDONLY_ON) return ERR_PTR(-ENOENT); return proc_lookup_de(dir, dentry, PDE(dir)); } /* * This returns non-zero if at EOF, so that the /proc * root directory can use this and check if it should * continue with the <pid> entries.. * * Note that the VFS-layer doesn't care about the return * value of the readdir() call, as long as it's non-negative * for success.. */ int proc_readdir_de(struct file *file, struct dir_context *ctx, struct proc_dir_entry *de) { int i; if (!dir_emit_dots(file, ctx)) return 0; i = ctx->pos - 2; read_lock(&proc_subdir_lock); de = pde_subdir_first(de); for (;;) { if (!de) { read_unlock(&proc_subdir_lock); return 0; } if (!i) break; de = pde_subdir_next(de); i--; } do { struct proc_dir_entry *next; pde_get(de); read_unlock(&proc_subdir_lock); if (!dir_emit(ctx, de->name, de->namelen, de->low_ino, de->mode >> 12)) { pde_put(de); return 0; } ctx->pos++; read_lock(&proc_subdir_lock); next = pde_subdir_next(de); pde_put(de); de = next; } while (de); read_unlock(&proc_subdir_lock); return 1; } int proc_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb); if (fs_info->pidonly == PROC_PIDONLY_ON) return 1; return proc_readdir_de(file, ctx, PDE(inode)); } /* * These are the generic /proc directory operations. They * use the in-memory "struct proc_dir_entry" tree to parse * the /proc directory. */ static const struct file_operations proc_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = proc_readdir, }; static int proc_net_d_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { return 0; } const struct dentry_operations proc_net_dentry_ops = { .d_revalidate = proc_net_d_revalidate, .d_delete = always_delete_dentry, }; /* * proc directories can do almost nothing.. */ static const struct inode_operations proc_dir_inode_operations = { .lookup = proc_lookup, .getattr = proc_getattr, .setattr = proc_notify_change, }; /* returns the registered entry, or frees dp and returns NULL on failure */ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir, struct proc_dir_entry *dp) { if (proc_alloc_inum(&dp->low_ino)) goto out_free_entry; write_lock(&proc_subdir_lock); dp->parent = dir; if (pde_subdir_insert(dir, dp) == false) { WARN(1, "proc_dir_entry '%s/%s' already registered\n", dir->name, dp->name); write_unlock(&proc_subdir_lock); goto out_free_inum; } dir->nlink++; write_unlock(&proc_subdir_lock); return dp; out_free_inum: proc_free_inum(dp->low_ino); out_free_entry: pde_free(dp); return NULL; } static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, const char *name, umode_t mode, nlink_t nlink) { struct proc_dir_entry *ent = NULL; const char *fn; struct qstr qstr; if (xlate_proc_name(name, parent, &fn) != 0) goto out; qstr.name = fn; qstr.len = strlen(fn); if (qstr.len == 0 || qstr.len >= 256) { WARN(1, "name len %u\n", qstr.len); return NULL; } if (qstr.len == 1 && fn[0] == '.') { WARN(1, "name '.'\n"); return NULL; } if (qstr.len == 2 && fn[0] == '.' && fn[1] == '.') { WARN(1, "name '..'\n"); return NULL; } if (*parent == &proc_root && name_to_int(&qstr) != ~0U) { WARN(1, "create '/proc/%s' by hand\n", qstr.name); return NULL; } if (is_empty_pde(*parent)) { WARN(1, "attempt to add to permanently empty directory"); return NULL; } ent = kmem_cache_zalloc(proc_dir_entry_cache, GFP_KERNEL); if (!ent) goto out; if (qstr.len + 1 <= SIZEOF_PDE_INLINE_NAME) { ent->name = ent->inline_name; } else { ent->name = kmalloc(qstr.len + 1, GFP_KERNEL); if (!ent->name) { pde_free(ent); return NULL; } } memcpy(ent->name, fn, qstr.len + 1); ent->namelen = qstr.len; ent->mode = mode; ent->nlink = nlink; ent->subdir = RB_ROOT; refcount_set(&ent->refcnt, 1); spin_lock_init(&ent->pde_unload_lock); INIT_LIST_HEAD(&ent->pde_openers); proc_set_user(ent, (*parent)->uid, (*parent)->gid); ent->proc_dops = &proc_misc_dentry_ops; /* Revalidate everything under /proc/${pid}/net */ if ((*parent)->proc_dops == &proc_net_dentry_ops) pde_force_lookup(ent); out: return ent; } struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent, const char *dest) { struct proc_dir_entry *ent; ent = __proc_create(&parent, name, (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); if (ent) { ent->size = strlen(dest); ent->data = kmemdup(dest, ent->size + 1, GFP_KERNEL); if (ent->data) { ent->proc_iops = &proc_link_inode_operations; ent = proc_register(parent, ent); } else { pde_free(ent); ent = NULL; } } return ent; } EXPORT_SYMBOL(proc_symlink); struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data, bool force_lookup) { struct proc_dir_entry *ent; if (mode == 0) mode = S_IRUGO | S_IXUGO; ent = __proc_create(&parent, name, S_IFDIR | mode, 2); if (ent) { ent->data = data; ent->proc_dir_ops = &proc_dir_operations; ent->proc_iops = &proc_dir_inode_operations; if (force_lookup) { pde_force_lookup(ent); } ent = proc_register(parent, ent); } return ent; } EXPORT_SYMBOL_GPL(_proc_mkdir); struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, struct proc_dir_entry *parent, void *data) { return _proc_mkdir(name, mode, parent, data, false); } EXPORT_SYMBOL_GPL(proc_mkdir_data); struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, struct proc_dir_entry *parent) { return proc_mkdir_data(name, mode, parent, NULL); } EXPORT_SYMBOL(proc_mkdir_mode); struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) { return proc_mkdir_data(name, 0, parent, NULL); } EXPORT_SYMBOL(proc_mkdir); struct proc_dir_entry *proc_create_mount_point(const char *name) { umode_t mode = S_IFDIR | S_IRUGO | S_IXUGO; struct proc_dir_entry *ent, *parent = NULL; ent = __proc_create(&parent, name, mode, 2); if (ent) { ent->data = NULL; ent->proc_dir_ops = NULL; ent->proc_iops = NULL; ent = proc_register(parent, ent); } return ent; } EXPORT_SYMBOL(proc_create_mount_point); struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode, struct proc_dir_entry **parent, void *data) { struct proc_dir_entry *p; if ((mode & S_IFMT) == 0) mode |= S_IFREG; if ((mode & S_IALLUGO) == 0) mode |= S_IRUGO; if (WARN_ON_ONCE(!S_ISREG(mode))) return NULL; p = __proc_create(parent, name, mode, 1); if (p) { p->proc_iops = &proc_file_inode_operations; p->data = data; } return p; } static void pde_set_flags(struct proc_dir_entry *pde) { if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT) pde->flags |= PROC_ENTRY_PERMANENT; if (pde->proc_ops->proc_read_iter) pde->flags |= PROC_ENTRY_proc_read_iter; #ifdef CONFIG_COMPAT if (pde->proc_ops->proc_compat_ioctl) pde->flags |= PROC_ENTRY_proc_compat_ioctl; #endif } struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops, void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; p->proc_ops = proc_ops; pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_data); struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops) { return proc_create_data(name, mode, parent, proc_ops, NULL); } EXPORT_SYMBOL(proc_create); static int proc_seq_open(struct inode *inode, struct file *file) { struct proc_dir_entry *de = PDE(inode); if (de->state_size) return seq_open_private(file, de->seq_ops, de->state_size); return seq_open(file, de->seq_ops); } static int proc_seq_release(struct inode *inode, struct file *file) { struct proc_dir_entry *de = PDE(inode); if (de->state_size) return seq_release_private(inode, file); return seq_release(inode, file); } static const struct proc_ops proc_seq_ops = { /* not permanent -- can call into arbitrary seq_operations */ .proc_open = proc_seq_open, .proc_read_iter = seq_read_iter, .proc_lseek = seq_lseek, .proc_release = proc_seq_release, }; struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct seq_operations *ops, unsigned int state_size, void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; p->proc_ops = &proc_seq_ops; p->seq_ops = ops; p->state_size = state_size; pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_seq_private); static int proc_single_open(struct inode *inode, struct file *file) { struct proc_dir_entry *de = PDE(inode); return single_open(file, de->single_show, de->data); } static const struct proc_ops proc_single_ops = { /* not permanent -- can call into arbitrary ->single_show */ .proc_open = proc_single_open, .proc_read_iter = seq_read_iter, .proc_lseek = seq_lseek, .proc_release = single_release, }; struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode, struct proc_dir_entry *parent, int (*show)(struct seq_file *, void *), void *data) { struct proc_dir_entry *p; p = proc_create_reg(name, mode, &parent, data); if (!p) return NULL; p->proc_ops = &proc_single_ops; p->single_show = show; pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_single_data); void proc_set_size(struct proc_dir_entry *de, loff_t size) { de->size = size; } EXPORT_SYMBOL(proc_set_size); void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) { de->uid = uid; de->gid = gid; } EXPORT_SYMBOL(proc_set_user); void pde_put(struct proc_dir_entry *pde) { if (refcount_dec_and_test(&pde->refcnt)) { proc_free_inum(pde->low_ino); pde_free(pde); } } /* * Remove a /proc entry and free it if it's not currently in use. */ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) { struct proc_dir_entry *de = NULL; const char *fn = name; unsigned int len; write_lock(&proc_subdir_lock); if (__xlate_proc_name(name, &parent, &fn) != 0) { write_unlock(&proc_subdir_lock); return; } len = strlen(fn); de = pde_subdir_find(parent, fn, len); if (de) { if (unlikely(pde_is_permanent(de))) { WARN(1, "removing permanent /proc entry '%s'", de->name); de = NULL; } else { rb_erase(&de->subdir_node, &parent->subdir); if (S_ISDIR(de->mode)) parent->nlink--; } } write_unlock(&proc_subdir_lock); if (!de) { WARN(1, "name '%s'\n", name); return; } proc_entry_rundown(de); WARN(pde_subdir_first(de), "%s: removing non-empty directory '%s/%s', leaking at least '%s'\n", __func__, de->parent->name, de->name, pde_subdir_first(de)->name); pde_put(de); } EXPORT_SYMBOL(remove_proc_entry); int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { struct proc_dir_entry *root = NULL, *de, *next; const char *fn = name; unsigned int len; write_lock(&proc_subdir_lock); if (__xlate_proc_name(name, &parent, &fn) != 0) { write_unlock(&proc_subdir_lock); return -ENOENT; } len = strlen(fn); root = pde_subdir_find(parent, fn, len); if (!root) { write_unlock(&proc_subdir_lock); return -ENOENT; } if (unlikely(pde_is_permanent(root))) { write_unlock(&proc_subdir_lock); WARN(1, "removing permanent /proc entry '%s/%s'", root->parent->name, root->name); return -EINVAL; } rb_erase(&root->subdir_node, &parent->subdir); de = root; while (1) { next = pde_subdir_first(de); if (next) { if (unlikely(pde_is_permanent(next))) { write_unlock(&proc_subdir_lock); WARN(1, "removing permanent /proc entry '%s/%s'", next->parent->name, next->name); return -EINVAL; } rb_erase(&next->subdir_node, &de->subdir); de = next; continue; } next = de->parent; if (S_ISDIR(de->mode)) next->nlink--; write_unlock(&proc_subdir_lock); proc_entry_rundown(de); if (de == root) break; pde_put(de); write_lock(&proc_subdir_lock); de = next; } pde_put(root); return 0; } EXPORT_SYMBOL(remove_proc_subtree); void *proc_get_parent_data(const struct inode *inode) { struct proc_dir_entry *de = PDE(inode); return de->parent->data; } EXPORT_SYMBOL_GPL(proc_get_parent_data); void proc_remove(struct proc_dir_entry *de) { if (de) remove_proc_subtree(de->name, de->parent); } EXPORT_SYMBOL(proc_remove); /* * Pull a user buffer into memory and pass it to the file's write handler if * one is supplied. The ->write() method is permitted to modify the * kernel-side buffer. */ ssize_t proc_simple_write(struct file *f, const char __user *ubuf, size_t size, loff_t *_pos) { struct proc_dir_entry *pde = PDE(file_inode(f)); char *buf; int ret; if (!pde->write) return -EACCES; if (size == 0 || size > PAGE_SIZE - 1) return -EINVAL; buf = memdup_user_nul(ubuf, size); if (IS_ERR(buf)) return PTR_ERR(buf); ret = pde->write(f, buf, size); kfree(buf); return ret == 0 ? size : ret; } |
5 4 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 | // SPDX-License-Identifier: GPL-2.0-only /* * RTL8XXXU mac80211 USB driver - 8192fu specific subdriver * * Copyright (c) 2023 Bitterblue Smith <rtl8821cerfe2@gmail.com> * * Portions copied from existing rtl8xxxu code: * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com> * * Portions, notably calibration code: * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. */ #include "regs.h" #include "rtl8xxxu.h" static const struct rtl8xxxu_reg8val rtl8192f_mac_init_table[] = { {0x420, 0x00}, {0x422, 0x78}, {0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00}, {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05}, {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01}, {0x442, 0x00}, {0x444, 0x10}, {0x445, 0xf0}, {0x446, 0x0e}, {0x447, 0x1f}, {0x448, 0x00}, {0x449, 0x00}, {0x44a, 0x00}, {0x44b, 0x00}, {0x44c, 0x10}, {0x44d, 0xf0}, {0x44e, 0x0e}, {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0x00}, {0x452, 0x00}, {0x453, 0x00}, {0x480, 0x20}, {0x49c, 0x30}, {0x49d, 0xf0}, {0x49e, 0x03}, {0x49f, 0x3e}, {0x4a0, 0x00}, {0x4a1, 0x00}, {0x4a2, 0x00}, {0x4a3, 0x00}, {0x4a4, 0x15}, {0x4a5, 0xf0}, {0x4a6, 0x01}, {0x4a7, 0x0e}, {0x4a8, 0xe0}, {0x4a9, 0x00}, {0x4aa, 0x00}, {0x4ab, 0x00}, {0x2448, 0x06}, {0x244a, 0x06}, {0x244c, 0x06}, {0x244e, 0x06}, {0x4c7, 0x80}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4ca, 0x3c}, {0x4cb, 0x3c}, {0x4cc, 0xff}, {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, {0x516, 0x0a}, {0x521, 0x2f}, {0x525, 0x0f}, {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50}, {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, {0x60c, 0x18}, {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff}, {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff}, {0x638, 0x50}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, {0x63f, 0x0e}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xc8}, {0x66e, 0x05}, {0x6a0, 0xff}, {0x6a1, 0xff}, {0x6a2, 0xff}, {0x6a3, 0xff}, {0x6a4, 0xff}, {0x6a5, 0xff}, {0x6de, 0x84}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65}, {0x70b, 0x87}, {0x718, 0x40}, {0x7c0, 0x38}, {0x7c2, 0x0f}, {0x7c3, 0xc0}, {0x073, 0x04}, {0x7c4, 0x77}, {0x024, 0xc7}, {0x7ec, 0xff}, {0x7ed, 0xff}, {0x7ee, 0xff}, {0x7ef, 0xff}, {0xffff, 0xff}, }; /* If updating the phy init table, also update rtl8192f_revise_cck_tx_psf(). */ static const struct rtl8xxxu_reg32val rtl8192fu_phy_init_table[] = { {0x800, 0x80006C00}, {0x804, 0x00004001}, {0x808, 0x0000FC00}, {0x80C, 0x00000000}, {0x810, 0x20200322}, {0x814, 0x020C3910}, {0x818, 0x00000385}, {0x81C, 0x07000000}, {0x820, 0x01000100}, {0x824, 0x00390204}, {0x828, 0x01000100}, {0x82C, 0x00390204}, {0x830, 0x25252525}, {0x834, 0x25252525}, {0x838, 0x25252525}, {0x83C, 0x25252525}, {0x840, 0x00010000}, {0x844, 0x00010000}, {0x848, 0x25252525}, {0x84C, 0x25252525}, {0x850, 0x00031FE0}, {0x854, 0x00000000}, {0x858, 0x569A569A}, {0x85C, 0x00400040}, {0x860, 0x66F60000}, {0x864, 0x061F0000}, {0x868, 0x25252525}, {0x86C, 0x25252525}, {0x870, 0x00000300}, {0x874, 0x04003400}, {0x878, 0x08080808}, {0x87C, 0x004F0201}, {0x880, 0xD8001402}, {0x884, 0xC0000120}, {0x888, 0x00000000}, {0x88C, 0xCC0000C0}, {0x890, 0x00000000}, {0x894, 0xFFFFFFFE}, {0x898, 0x40302010}, {0x89C, 0x00706050}, {0x900, 0x00000000}, {0x904, 0x00000023}, {0x908, 0x00000F00}, {0x90C, 0x81121313}, {0x910, 0x024C0000}, {0x914, 0x00000000}, {0x918, 0x00000000}, {0x91C, 0x00000000}, {0x920, 0x00000000}, {0x924, 0x00000000}, {0x928, 0x00000000}, {0x92C, 0x00000000}, {0x930, 0x88000000}, {0x934, 0x00000245}, {0x938, 0x00024588}, {0x93C, 0x00000000}, {0x940, 0x000007FF}, {0x944, 0x3F3F0000}, {0x948, 0x000001A3}, {0x94C, 0x20200008}, {0x950, 0x00338A98}, {0x954, 0x00000000}, {0x958, 0xCBCAD87A}, {0x95C, 0x06EB5735}, {0x960, 0x00000000}, {0x964, 0x00000000}, {0x968, 0x00000000}, {0x96C, 0x00000003}, {0x970, 0x00000000}, {0x974, 0x00000000}, {0x978, 0x00000000}, {0x97C, 0x10030000}, {0x980, 0x00000000}, {0x984, 0x02800280}, {0x988, 0x020A5704}, {0x98C, 0x1461C826}, {0x990, 0x0001469E}, {0x994, 0x008858D1}, {0x998, 0x400086C9}, {0x99C, 0x44444242}, {0x9A0, 0x00000000}, {0x9A4, 0x00000000}, {0x9A8, 0x00000000}, {0x9AC, 0xC0000000}, {0xA00, 0x00D047C8}, {0xA04, 0xC1FF0008}, {0xA08, 0x88838300}, {0xA0C, 0x2E20100F}, {0xA10, 0x9500BB78}, {0xA14, 0x11144028}, {0xA18, 0x00881117}, {0xA1C, 0x89140F00}, {0xA20, 0xE82C0001}, {0xA24, 0x64B80C1C}, {0xA28, 0x00158810}, {0xA2C, 0x10BB8000}, {0xA70, 0x00008000}, {0xA74, 0x80800100}, {0xA78, 0x000089F0}, {0xA7C, 0x225B0606}, {0xA80, 0x20803210}, {0xA84, 0x00200200}, {0xA88, 0x00000000}, {0xA8C, 0x00000000}, {0xA90, 0x00000000}, {0xA94, 0x00000000}, {0xA98, 0x00000000}, {0xA9C, 0x00460000}, {0xAA0, 0x00000000}, {0xAA4, 0x00020014}, {0xAA8, 0xBA0A0008}, {0xAAC, 0x01235667}, {0xAB0, 0x00000000}, {0xAB4, 0x00201402}, {0xAB8, 0x0000001C}, {0xABC, 0x0000F7FF}, {0xAC0, 0xD4C0A742}, {0xAC4, 0x00000000}, {0xAC8, 0x00000F08}, {0xACC, 0x00000F07}, {0xAD0, 0xA1052A10}, {0xAD4, 0x0D9D8452}, {0xAD8, 0x9E024024}, {0xADC, 0x0023C001}, {0xAE0, 0x00000391}, {0xB2C, 0x00000000}, {0xC00, 0x00000080}, {0xC04, 0x6F005433}, {0xC08, 0x000004E4}, {0xC0C, 0x6C6C6C6C}, {0xC10, 0x22000000}, {0xC14, 0x40000100}, {0xC18, 0x22000000}, {0xC1C, 0x40000100}, {0xC20, 0x00000000}, {0xC24, 0x40000100}, {0xC28, 0x00000000}, {0xC2C, 0x40000100}, {0xC30, 0x0401E809}, {0xC34, 0x30000020}, {0xC38, 0x23808080}, {0xC3C, 0x00002F44}, {0xC40, 0x1CF8403F}, {0xC44, 0x000100C7}, {0xC48, 0xEC060106}, {0xC4C, 0x007F037F}, {0xC50, 0x00E48020}, {0xC54, 0x04008017}, {0xC58, 0x00000020}, {0xC5C, 0x00708492}, {0xC60, 0x09280200}, {0xC64, 0x5014838B}, {0xC68, 0x47C006C7}, {0xC6C, 0x00000035}, {0xC70, 0x00001007}, {0xC74, 0x02815269}, {0xC78, 0x0FE07F1F}, {0xC7C, 0x00B91612}, {0xC80, 0x40000100}, {0xC84, 0x32000000}, {0xC88, 0x40000100}, {0xC8C, 0xA0240000}, {0xC90, 0x400E161E}, {0xC94, 0x00000F00}, {0xC98, 0x400E161E}, {0xC9C, 0x0000BDC8}, {0xCA0, 0x00000000}, {0xCA4, 0x098300A0}, {0xCA8, 0x00006B00}, {0xCAC, 0x87F45B1A}, {0xCB0, 0x0000002D}, {0xCB4, 0x00000000}, {0xCB8, 0x00000000}, {0xCBC, 0x28100200}, {0xCC0, 0x0010A3D0}, {0xCC4, 0x00000F7D}, {0xCC8, 0x00000000}, {0xCCC, 0x00000000}, {0xCD0, 0x593659AD}, {0xCD4, 0xB7545121}, {0xCD8, 0x64B22427}, {0xCDC, 0x00766932}, {0xCE0, 0x40201000}, {0xCE4, 0x00000000}, {0xCE8, 0x40E04407}, {0xCEC, 0x2E572000}, {0xD00, 0x000D8780}, {0xD04, 0x40020403}, {0xD08, 0x0002907F}, {0xD0C, 0x20010201}, {0xD10, 0x06288888}, {0xD14, 0x8888367B}, {0xD18, 0x7D806DB3}, {0xD1C, 0x0000007F}, {0xD20, 0x567600B8}, {0xD24, 0x0000018B}, {0xD28, 0xD513FF7D}, {0xD2C, 0xCC979975}, {0xD30, 0x04928000}, {0xD34, 0x40608000}, {0xD38, 0x88DDA000}, {0xD3C, 0x00026EE2}, {0xD50, 0x67270001}, {0xD54, 0x20500000}, {0xD58, 0x16161616}, {0xD5C, 0x71F20064}, {0xD60, 0x4653DA60}, {0xD64, 0x3E718A3C}, {0xD68, 0x00000183}, {0xD7C, 0x00000000}, {0xD80, 0x50000000}, {0xD84, 0x31310400}, {0xD88, 0xF5B50000}, {0xD8C, 0x00000000}, {0xD90, 0x00000000}, {0xD94, 0x44BBBB44}, {0xD98, 0x44BB44FF}, {0xD9C, 0x06033688}, {0xE00, 0x25252525}, {0xE04, 0x25252525}, {0xE08, 0x25252525}, {0xE10, 0x25252525}, {0xE14, 0x25252525}, {0xE18, 0x25252525}, {0xE1C, 0x25252525}, {0xE20, 0x00000000}, {0xE24, 0x00200000}, {0xE28, 0x00000000}, {0xE2C, 0x00000000}, {0xE30, 0x01007C00}, {0xE34, 0x01004800}, {0xE38, 0x10008C0F}, {0xE3C, 0x3C008C0F}, {0xE40, 0x01007C00}, {0xE44, 0x00000000}, {0xE48, 0x00000000}, {0xE4C, 0x00000000}, {0xE50, 0x01007C00}, {0xE54, 0x01004800}, {0xE58, 0x10008C0F}, {0xE5C, 0x3C008C0F}, {0xE60, 0x02100000}, {0xE64, 0xBBBBBBBB}, {0xE68, 0x40404040}, {0xE6C, 0x80408040}, {0xE70, 0x80408040}, {0xE74, 0x40404040}, {0xE78, 0x00400040}, {0xE7C, 0x40404040}, {0xE80, 0x00FF0000}, {0xE84, 0x80408040}, {0xE88, 0x40404040}, {0xE8C, 0x80408040}, {0xED0, 0x80408040}, {0xED4, 0x80408040}, {0xED8, 0x80408040}, {0xEDC, 0xC040C040}, {0xEE0, 0xC040C040}, {0xEE4, 0x00400040}, {0xEE8, 0xD8001402}, {0xEEC, 0xC0000120}, {0xEF0, 0x02000B09}, {0xEF4, 0x00000001}, {0xEF8, 0x00000000}, {0xF00, 0x00000300}, {0xF04, 0x00000002}, {0xF08, 0x00007D0C}, {0xF0C, 0x0000A907}, {0xF10, 0x00005807}, {0xF14, 0x00000003}, {0xF18, 0x07D003E8}, {0xF1C, 0x8000001F}, {0xF20, 0x00000000}, {0xF24, 0x00000000}, {0xF28, 0x00000000}, {0xF2C, 0x00000000}, {0xF30, 0x00000000}, {0xF34, 0x00000000}, {0xF38, 0x00030055}, {0xF3C, 0x0000003A}, {0xF40, 0x00000002}, {0xF44, 0x00000000}, {0xF48, 0x00000000}, {0xF4C, 0x0B000000}, {0xF50, 0x00000000}, {0xffff, 0xffffffff}, }; static const struct rtl8xxxu_reg32val rtl8192f_agc_table[] = { {0xC78, 0x0FA0001F}, {0xC78, 0x0FA0011F}, {0xC78, 0x0FA0021F}, {0xC78, 0x0FA0031F}, {0xC78, 0x0FA0041F}, {0xC78, 0x0FA0051F}, {0xC78, 0x0F90061F}, {0xC78, 0x0F80071F}, {0xC78, 0x0F70081F}, {0xC78, 0x0F60091F}, {0xC78, 0x0F500A1F}, {0xC78, 0x0F400B1F}, {0xC78, 0x0F300C1F}, {0xC78, 0x0F200D1F}, {0xC78, 0x0F100E1F}, {0xC78, 0x0F000F1F}, {0xC78, 0x0EF0101F}, {0xC78, 0x0EE0111F}, {0xC78, 0x0ED0121F}, {0xC78, 0x0EC0131F}, {0xC78, 0x0EB0141F}, {0xC78, 0x0EA0151F}, {0xC78, 0x0E90161F}, {0xC78, 0x0E80171F}, {0xC78, 0x0E70181F}, {0xC78, 0x0E60191F}, {0xC78, 0x0E501A1F}, {0xC78, 0x0E401B1F}, {0xC78, 0x0E301C1F}, {0xC78, 0x0C701D1F}, {0xC78, 0x0C601E1F}, {0xC78, 0x0C501F1F}, {0xC78, 0x0C40201F}, {0xC78, 0x0C30211F}, {0xC78, 0x0A60221F}, {0xC78, 0x0A50231F}, {0xC78, 0x0A40241F}, {0xC78, 0x0A30251F}, {0xC78, 0x0860261F}, {0xC78, 0x0850271F}, {0xC78, 0x0840281F}, {0xC78, 0x0830291F}, {0xC78, 0x06702A1F}, {0xC78, 0x06602B1F}, {0xC78, 0x06502C1F}, {0xC78, 0x06402D1F}, {0xC78, 0x06302E1F}, {0xC78, 0x04602F1F}, {0xC78, 0x0450301F}, {0xC78, 0x0440311F}, {0xC78, 0x0430321F}, {0xC78, 0x0260331F}, {0xC78, 0x0250341F}, {0xC78, 0x0240351F}, {0xC78, 0x0230361F}, {0xC78, 0x0050371F}, {0xC78, 0x0040381F}, {0xC78, 0x0030391F}, {0xC78, 0x00203A1F}, {0xC78, 0x00103B1F}, {0xC78, 0x00003C1F}, {0xC78, 0x00003D1F}, {0xC78, 0x00003E1F}, {0xC78, 0x00003F1F}, {0xC78, 0x0FA0401F}, {0xC78, 0x0FA0411F}, {0xC78, 0x0FA0421F}, {0xC78, 0x0FA0431F}, {0xC78, 0x0F90441F}, {0xC78, 0x0F80451F}, {0xC78, 0x0F70461F}, {0xC78, 0x0F60471F}, {0xC78, 0x0F50481F}, {0xC78, 0x0F40491F}, {0xC78, 0x0F304A1F}, {0xC78, 0x0F204B1F}, {0xC78, 0x0F104C1F}, {0xC78, 0x0F004D1F}, {0xC78, 0x0EF04E1F}, {0xC78, 0x0EE04F1F}, {0xC78, 0x0ED0501F}, {0xC78, 0x0EC0511F}, {0xC78, 0x0EB0521F}, {0xC78, 0x0EA0531F}, {0xC78, 0x0E90541F}, {0xC78, 0x0E80551F}, {0xC78, 0x0E70561F}, {0xC78, 0x0E60571F}, {0xC78, 0x0E50581F}, {0xC78, 0x0E40591F}, {0xC78, 0x0E305A1F}, {0xC78, 0x0E205B1F}, {0xC78, 0x0E105C1F}, {0xC78, 0x0C505D1F}, {0xC78, 0x0C405E1F}, {0xC78, 0x0C305F1F}, {0xC78, 0x0C20601F}, {0xC78, 0x0C10611F}, {0xC78, 0x0A40621F}, {0xC78, 0x0A30631F}, {0xC78, 0x0A20641F}, {0xC78, 0x0A10651F}, {0xC78, 0x0840661F}, {0xC78, 0x0830671F}, {0xC78, 0x0820681F}, {0xC78, 0x0810691F}, {0xC78, 0x06506A1F}, {0xC78, 0x06406B1F}, {0xC78, 0x06306C1F}, {0xC78, 0x06206D1F}, {0xC78, 0x06106E1F}, {0xC78, 0x04406F1F}, {0xC78, 0x0430701F}, {0xC78, 0x0420711F}, {0xC78, 0x0410721F}, {0xC78, 0x0240731F}, {0xC78, 0x0230741F}, {0xC78, 0x0220751F}, {0xC78, 0x0210761F}, {0xC78, 0x0030771F}, {0xC78, 0x0020781F}, {0xC78, 0x0010791F}, {0xC78, 0x00007A1F}, {0xC78, 0x00007B1F}, {0xC78, 0x00007C1F}, {0xC78, 0x00007D1F}, {0xC78, 0x00007E1F}, {0xC78, 0x00007F1F}, {0xC78, 0x0FA0801F}, {0xC78, 0x0FA0811F}, {0xC78, 0x0FA0821F}, {0xC78, 0x0FA0831F}, {0xC78, 0x0FA0841F}, {0xC78, 0x0FA0851F}, {0xC78, 0x0F90861F}, {0xC78, 0x0F80871F}, {0xC78, 0x0F70881F}, {0xC78, 0x0F60891F}, {0xC78, 0x0F508A1F}, {0xC78, 0x0F408B1F}, {0xC78, 0x0F308C1F}, {0xC78, 0x0F208D1F}, {0xC78, 0x0F108E1F}, {0xC78, 0x0B908F1F}, {0xC78, 0x0B80901F}, {0xC78, 0x0B70911F}, {0xC78, 0x0B60921F}, {0xC78, 0x0B50931F}, {0xC78, 0x0B40941F}, {0xC78, 0x0B30951F}, {0xC78, 0x0B20961F}, {0xC78, 0x0B10971F}, {0xC78, 0x0B00981F}, {0xC78, 0x0AF0991F}, {0xC78, 0x0AE09A1F}, {0xC78, 0x0AD09B1F}, {0xC78, 0x0AC09C1F}, {0xC78, 0x0AB09D1F}, {0xC78, 0x0AA09E1F}, {0xC78, 0x0A909F1F}, {0xC78, 0x0A80A01F}, {0xC78, 0x0A70A11F}, {0xC78, 0x0A60A21F}, {0xC78, 0x0A50A31F}, {0xC78, 0x0A40A41F}, {0xC78, 0x0A30A51F}, {0xC78, 0x0A20A61F}, {0xC78, 0x0A10A71F}, {0xC78, 0x0A00A81F}, {0xC78, 0x0830A91F}, {0xC78, 0x0820AA1F}, {0xC78, 0x0810AB1F}, {0xC78, 0x0800AC1F}, {0xC78, 0x0640AD1F}, {0xC78, 0x0630AE1F}, {0xC78, 0x0620AF1F}, {0xC78, 0x0610B01F}, {0xC78, 0x0600B11F}, {0xC78, 0x0430B21F}, {0xC78, 0x0420B31F}, {0xC78, 0x0410B41F}, {0xC78, 0x0400B51F}, {0xC78, 0x0230B61F}, {0xC78, 0x0220B71F}, {0xC78, 0x0210B81F}, {0xC78, 0x0200B91F}, {0xC78, 0x0000BA1F}, {0xC78, 0x0000BB1F}, {0xC78, 0x0000BC1F}, {0xC78, 0x0000BD1F}, {0xC78, 0x0000BE1F}, {0xC78, 0x0000BF1F}, {0xC50, 0x00E48024}, {0xC50, 0x00E48020}, {0xffff, 0xffffffff} }; static const struct rtl8xxxu_rfregval rtl8192fu_radioa_init_table[] = { {0x00, 0x30000}, {0x18, 0x0FC07}, {0x81, 0x0FC00}, {0x82, 0x003C0}, {0x84, 0x00005}, {0x86, 0xA33A5}, {0x87, 0x00000}, {0x88, 0x58010}, {0x8E, 0x64540}, {0x8F, 0x282D8}, {0x51, 0x02C06}, {0x52, 0x7A007}, {0x53, 0x10061}, {0x54, 0x60018}, {0x55, 0x82020}, {0x56, 0x08CC6}, {0x57, 0x2CC00}, {0x58, 0x00000}, {0x5A, 0x50000}, {0x5B, 0x00006}, {0x5C, 0x00015}, {0x65, 0x20000}, {0x6E, 0x38319}, {0xF5, 0x43180}, {0xEF, 0x00002}, {0x33, 0x00301}, {0x33, 0x1032A}, {0x33, 0x2032A}, {0xEF, 0x00000}, {0xDF, 0x00002}, {0x35, 0x00000}, {0xF0, 0x08008}, {0xEF, 0x00800}, {0x33, 0x0040E}, {0x33, 0x04845}, {0x33, 0x08848}, {0x33, 0x0C84B}, {0x33, 0x1088A}, {0x33, 0x14C50}, {0x33, 0x18C8E}, {0x33, 0x1CCCD}, {0x33, 0x20CD0}, {0x33, 0x24CD3}, {0x33, 0x28CD6}, {0x33, 0x4002B}, {0x33, 0x4402E}, {0x33, 0x48846}, {0x33, 0x4C849}, {0x33, 0x50888}, {0x33, 0x54CC6}, {0x33, 0x58CC9}, {0x33, 0x5CCCC}, {0x33, 0x60CCF}, {0x33, 0x64CD2}, {0x33, 0x68CD5}, {0xEF, 0x00000}, {0xEF, 0x00400}, {0x33, 0x01C23}, {0x33, 0x05C23}, {0x33, 0x09D23}, {0x33, 0x0DD23}, {0x33, 0x11FA3}, {0x33, 0x15FA3}, {0x33, 0x19FAB}, {0x33, 0x1DFAB}, {0xEF, 0x00000}, {0xEF, 0x00200}, {0x33, 0x00030}, {0x33, 0x04030}, {0x33, 0x08030}, {0x33, 0x0C030}, {0x33, 0x10030}, {0x33, 0x14030}, {0x33, 0x18030}, {0x33, 0x1C030}, {0x33, 0x20030}, {0x33, 0x24030}, {0x33, 0x28030}, {0x33, 0x2C030}, {0x33, 0x30030}, {0x33, 0x34030}, {0x33, 0x38030}, {0x33, 0x3C030}, {0xEF, 0x00000}, {0xEF, 0x00100}, {0x33, 0x44001}, {0x33, 0x48001}, {0x33, 0x4C001}, {0x33, 0x50001}, {0x33, 0x54001}, {0x33, 0x58001}, {0x33, 0x5C001}, {0x33, 0x60001}, {0x33, 0x64001}, {0x33, 0x68001}, {0x33, 0x6C001}, {0x33, 0x70001}, {0x33, 0x74001}, {0x33, 0x78001}, {0x33, 0x04000}, {0x33, 0x08000}, {0x33, 0x0C000}, {0x33, 0x10000}, {0x33, 0x14000}, {0x33, 0x18001}, {0x33, 0x1C002}, {0x33, 0x20002}, {0x33, 0x24002}, {0x33, 0x28002}, {0x33, 0x2C002}, {0x33, 0x30002}, {0x33, 0x34002}, {0x33, 0x38002}, {0xEF, 0x00000}, {0x84, 0x00000}, {0xEF, 0x80010}, {0x30, 0x20000}, {0x31, 0x0006F}, {0x32, 0x01FF7}, {0xEF, 0x00000}, {0x84, 0x00000}, {0xEF, 0x80000}, {0x30, 0x30000}, {0x31, 0x0006F}, {0x32, 0xF1DF3}, {0xEF, 0x00000}, {0x84, 0x00000}, {0xEF, 0x80000}, {0x30, 0x38000}, {0x31, 0x0006F}, {0x32, 0xF1FF2}, {0xEF, 0x00000}, {0x1B, 0x746CE}, {0xEF, 0x20000}, {0x33, 0x30000}, {0x33, 0x38000}, {0x33, 0x70000}, {0x33, 0x78000}, {0xEF, 0x00000}, {0xDF, 0x08000}, {0xB0, 0xFFBCB}, {0xB3, 0x06000}, {0xB7, 0x18DF0}, {0xB8, 0x38FF0}, {0xC9, 0x00600}, {0xDF, 0x00000}, {0xB1, 0x33B8F}, {0xB2, 0x33762}, {0xB4, 0x141F0}, {0xB5, 0x14080}, {0xB6, 0x12425}, {0xB9, 0xC0008}, {0xBA, 0x40005}, {0xC2, 0x02C01}, {0xC3, 0x0000B}, {0xC4, 0x81E2F}, {0xC5, 0x5C28F}, {0xC6, 0x000A0}, {0xCA, 0x02000}, {0xFE, 0x00000}, {0x18, 0x08C07}, {0xFE, 0x00000}, {0xFE, 0x00000}, {0xFE, 0x00000}, {0x00, 0x31DD5}, {0xff, 0xffffffff} }; static const struct rtl8xxxu_rfregval rtl8192fu_radiob_init_table[] = { {0x00, 0x30000}, {0x81, 0x0FC00}, {0x82, 0x003C0}, {0x84, 0x00005}, {0x86, 0xA33A5}, {0x87, 0x00000}, {0x88, 0x58010}, {0x8E, 0x64540}, {0x8F, 0x282D8}, {0x51, 0x02C06}, {0x52, 0x7A007}, {0x53, 0x10061}, {0x54, 0x60018}, {0x55, 0x82020}, {0x56, 0x08CC6}, {0x57, 0x2CC00}, {0x58, 0x00000}, {0x5A, 0x50000}, {0x5B, 0x00006}, {0x5C, 0x00015}, {0x65, 0x20000}, {0x6E, 0x38319}, {0xF5, 0x43180}, {0xEF, 0x00002}, {0x33, 0x00301}, {0x33, 0x1032A}, {0x33, 0x2032A}, {0xEF, 0x00000}, {0xDF, 0x00002}, {0x35, 0x00000}, {0xF0, 0x08008}, {0xEF, 0x00800}, {0x33, 0x0040E}, {0x33, 0x04845}, {0x33, 0x08848}, {0x33, 0x0C84B}, {0x33, 0x1088A}, {0x33, 0x14CC8}, {0x33, 0x18CCB}, {0x33, 0x1CCCE}, {0x33, 0x20CD1}, {0x33, 0x24CD4}, {0x33, 0x28CD7}, {0x33, 0x4002B}, {0x33, 0x4402E}, {0x33, 0x48846}, {0x33, 0x4C849}, {0x33, 0x50888}, {0x33, 0x54CC6}, {0x33, 0x58CC9}, {0x33, 0x5CCCC}, {0x33, 0x60CCF}, {0x33, 0x64CD2}, {0x33, 0x68CD5}, {0xEF, 0x00000}, {0xEF, 0x00400}, {0x33, 0x01D23}, {0x33, 0x05D23}, {0x33, 0x09FA3}, {0x33, 0x0DFA3}, {0x33, 0x11D2B}, {0x33, 0x15D2B}, {0x33, 0x19FAB}, {0x33, 0x1DFAB}, {0xEF, 0x00000}, {0xEF, 0x00200}, {0x33, 0x00030}, {0x33, 0x04030}, {0x33, 0x08030}, {0x33, 0x0C030}, {0x33, 0x10030}, {0x33, 0x14030}, {0x33, 0x18030}, {0x33, 0x1C030}, {0x33, 0x20030}, {0x33, 0x24030}, {0x33, 0x28030}, {0x33, 0x2C030}, {0x33, 0x30030}, {0x33, 0x34030}, {0x33, 0x38030}, {0x33, 0x3C030}, {0xEF, 0x00000}, {0xEF, 0x00100}, {0x33, 0x44000}, {0x33, 0x48000}, {0x33, 0x4C000}, {0x33, 0x50000}, {0x33, 0x54000}, {0x33, 0x58000}, {0x33, 0x5C000}, {0x33, 0x60000}, {0x33, 0x64000}, {0x33, 0x68000}, {0x33, 0x6C000}, {0x33, 0x70000}, {0x33, 0x74000}, {0x33, 0x78000}, {0x33, 0x04000}, {0x33, 0x08000}, {0x33, 0x0C000}, {0x33, 0x10000}, {0x33, 0x14000}, {0x33, 0x18000}, {0x33, 0x1C001}, {0x33, 0x20001}, {0x33, 0x24001}, {0x33, 0x28001}, {0x33, 0x2C001}, {0x33, 0x30001}, {0x33, 0x34001}, {0x33, 0x38001}, {0xEF, 0x00000}, {0x84, 0x00000}, {0xEF, 0x80010}, {0x30, 0x20000}, {0x31, 0x0006F}, {0x32, 0x01FF7}, {0xEF, 0x00000}, {0x84, 0x00000}, {0xEF, 0x80000}, {0x30, 0x30000}, {0x31, 0x0006F}, {0x32, 0xF1DF3}, {0xEF, 0x00000}, {0x84, 0x00000}, {0xEF, 0x80000}, {0x30, 0x38000}, {0x31, 0x0006F}, {0x32, 0xF1FF2}, {0xEF, 0x00000}, {0x1B, 0x746CE}, {0xEF, 0x20000}, {0x33, 0x30000}, {0x33, 0x38000}, {0x33, 0x70000}, {0x33, 0x78000}, {0xEF, 0x00000}, {0x00, 0x31DD5}, {0xff, 0xffffffff} }; static int rtl8192fu_identify_chip(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; u32 sys_cfg, vendor, val32; strscpy(priv->chip_name, "8192FU", sizeof(priv->chip_name)); priv->rtl_chip = RTL8192F; priv->rf_paths = 2; priv->rx_paths = 2; priv->tx_paths = 2; sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG); priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK); if (sys_cfg & SYS_CFG_TRP_VAUX_EN) { dev_info(dev, "Unsupported test chip\n"); return -EOPNOTSUPP; } val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL); priv->has_wifi = u32_get_bits(val32, MULTI_WIFI_FUNC_EN); priv->has_bluetooth = u32_get_bits(val32, MULTI_BT_FUNC_EN); priv->has_gps = u32_get_bits(val32, MULTI_GPS_FUNC_EN); priv->is_multi_func = 1; vendor = sys_cfg & SYS_CFG_VENDOR_ID; rtl8xxxu_identify_vendor_1bit(priv, vendor); val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS); priv->rom_rev = u32_get_bits(val32, GPIO_RF_RL_ID); return rtl8xxxu_config_endpoints_no_sie(priv); } static void rtl8192f_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) { u8 cck, ofdmbase, mcsbase; u32 val32, ofdm, mcs; int group, cck_group; rtl8188f_channel_to_group(channel, &group, &cck_group); cck = priv->cck_tx_power_index_A[cck_group]; rtl8xxxu_write32_mask(priv, REG_TX_AGC_A_CCK1_MCS32, 0x00007f00, cck); val32 = (cck << 16) | (cck << 8) | cck; rtl8xxxu_write32_mask(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, 0x7f7f7f00, val32); ofdmbase = priv->ht40_1s_tx_power_index_A[group]; ofdmbase += priv->ofdm_tx_power_diff[RF_A].a; ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24; rtl8xxxu_write32_mask(priv, REG_TX_AGC_A_RATE18_06, 0x7f7f7f7f, ofdm); rtl8xxxu_write32_mask(priv, REG_TX_AGC_A_RATE54_24, 0x7f7f7f7f, ofdm); mcsbase = priv->ht40_1s_tx_power_index_A[group]; if (ht40) mcsbase += priv->ht40_tx_power_diff[RF_A].a; else mcsbase += priv->ht20_tx_power_diff[RF_A].a; mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24; rtl8xxxu_write32_mask(priv, REG_TX_AGC_A_MCS03_MCS00, 0x7f7f7f7f, mcs); rtl8xxxu_write32_mask(priv, REG_TX_AGC_A_MCS07_MCS04, 0x7f7f7f7f, mcs); rtl8xxxu_write32_mask(priv, REG_TX_AGC_A_MCS11_MCS08, 0x7f7f7f7f, mcs); rtl8xxxu_write32_mask(priv, REG_TX_AGC_A_MCS15_MCS12, 0x7f7f7f7f, mcs); if (priv->tx_paths == 1) return; cck = priv->cck_tx_power_index_B[cck_group]; val32 = (cck << 16) | (cck << 8) | cck; rtl8xxxu_write32_mask(priv, REG_TX_AGC_B_CCK1_55_MCS32, 0x7f7f7f00, val32); rtl8xxxu_write32_mask(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, 0x0000007f, cck); ofdmbase = priv->ht40_1s_tx_power_index_B[group]; ofdmbase += priv->ofdm_tx_power_diff[RF_B].b; ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24; rtl8xxxu_write32_mask(priv, REG_TX_AGC_B_RATE18_06, 0x7f7f7f7f, ofdm); rtl8xxxu_write32_mask(priv, REG_TX_AGC_B_RATE54_24, 0x7f7f7f7f, ofdm); mcsbase = priv->ht40_1s_tx_power_index_B[group]; if (ht40) mcsbase += priv->ht40_tx_power_diff[RF_B].b; else mcsbase += priv->ht20_tx_power_diff[RF_B].b; mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24; rtl8xxxu_write32_mask(priv, REG_TX_AGC_B_MCS03_MCS00, 0x7f7f7f7f, mcs); rtl8xxxu_write32_mask(priv, REG_TX_AGC_B_MCS07_MCS04, 0x7f7f7f7f, mcs); rtl8xxxu_write32_mask(priv, REG_TX_AGC_B_MCS11_MCS08, 0x7f7f7f7f, mcs); rtl8xxxu_write32_mask(priv, REG_TX_AGC_B_MCS15_MCS12, 0x7f7f7f7f, mcs); } static void rtl8192f_revise_cck_tx_psf(struct rtl8xxxu_priv *priv, u8 channel) { if (channel == 13) { /* Special value for channel 13 */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER1, 0xf8fe0001); /* Normal values */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER2, 0x64B80C1C); rtl8xxxu_write16(priv, REG_CCK0_DEBUG_PORT, 0x8810); rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER3, 0x01235667); } else if (channel == 14) { /* Normal value */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER1, 0xE82C0001); /* Special values for channel 14 */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER2, 0x0000B81C); rtl8xxxu_write16(priv, REG_CCK0_DEBUG_PORT, 0x0000); rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER3, 0x00003667); } else { /* Restore normal values from the phy init table */ rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER1, 0xE82C0001); rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER2, 0x64B80C1C); rtl8xxxu_write16(priv, REG_CCK0_DEBUG_PORT, 0x8810); rtl8xxxu_write32(priv, REG_CCK0_TX_FILTER3, 0x01235667); } } static void rtl8192fu_config_kfree(struct rtl8xxxu_priv *priv, u8 channel) { u8 bb_gain[3] = { EFUSE_UNDEFINED, EFUSE_UNDEFINED, EFUSE_UNDEFINED }; u8 bb_gain_path_mask[2] = { 0x0f, 0xf0 }; enum rtl8xxxu_rfpath rfpath; u8 bb_gain_for_path; u8 channel_idx = 0; if (channel >= 1 && channel <= 3) channel_idx = 0; if (channel >= 4 && channel <= 9) channel_idx = 1; if (channel >= 10 && channel <= 14) channel_idx = 2; rtl8xxxu_read_efuse8(priv, 0x1ee, &bb_gain[1]); rtl8xxxu_read_efuse8(priv, 0x1ec, &bb_gain[0]); rtl8xxxu_read_efuse8(priv, 0x1ea, &bb_gain[2]); if (bb_gain[1] == EFUSE_UNDEFINED) return; if (bb_gain[0] == EFUSE_UNDEFINED) bb_gain[0] = bb_gain[1]; if (bb_gain[2] == EFUSE_UNDEFINED) bb_gain[2] = bb_gain[1]; for (rfpath = RF_A; rfpath < priv->rf_paths; rfpath++) { /* power_trim based on 55[19:14] */ rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_UNKNOWN_55, BIT(5), 1); /* enable 55[14] for 0.5db step */ rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_GAIN_CTRL, BIT(18), 1); /* enter power_trim debug mode */ rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_GAIN_CCA, BIT(7), 1); /* write enable */ rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_WE_LUT, BIT(7), 1); bb_gain_for_path = (bb_gain[channel_idx] & bb_gain_path_mask[rfpath]); bb_gain_for_path >>= __ffs(bb_gain_path_mask[rfpath]); rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_TXPA_G3, 0x70000, channel_idx * 2); rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_TXPA_G3, 0x3f, bb_gain_for_path); rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_TXPA_G3, 0x70000, channel_idx * 2 + 1); rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_TXPA_G3, 0x3f, bb_gain_for_path); /* leave power_trim debug mode */ rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_GAIN_CCA, BIT(7), 0); /* write disable */ rtl8xxxu_write_rfreg_mask(priv, rfpath, RF6052_REG_WE_LUT, BIT(7), 0); } } static void rtl8192fu_config_channel(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; bool ht40 = conf_is_ht40(&hw->conf); u8 channel, subchannel = 0; bool sec_ch_above = 0; u32 val32; channel = (u8)hw->conf.chandef.chan->hw_value; if (conf_is_ht40_plus(&hw->conf)) { sec_ch_above = 1; channel += 2; subchannel = 2; } else if (conf_is_ht40_minus(&hw->conf)) { sec_ch_above = 0; channel -= 2; subchannel = 1; } val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); rtl8192f_revise_cck_tx_psf(priv, channel); /* Set channel */ val32 &= ~(BIT(18) | BIT(17)); /* select the 2.4G band(?) */ u32p_replace_bits(&val32, channel, 0xff); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32); if (priv->rf_paths > 1) rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_MODE_AG, val32); rtl8192fu_config_kfree(priv, channel); rtl8xxxu_write8(priv, REG_DATA_SUBCHANNEL, subchannel); /* small BW */ rtl8xxxu_write32_clear(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, GENMASK(31, 30)); rtl8xxxu_write32_mask(priv, REG_FPGA0_RF_MODE, FPGA_RF_MODE, ht40); rtl8xxxu_write32_mask(priv, REG_FPGA1_RF_MODE, FPGA_RF_MODE, ht40); /* ADC clock = 160M */ rtl8xxxu_write32_mask(priv, REG_FPGA0_RF_MODE, GENMASK(10, 8), 4); /* DAC clock = 80M */ rtl8xxxu_write32_mask(priv, REG_FPGA0_RF_MODE, BIT(13) | BIT(12), 2); /* ADC buffer clk */ rtl8xxxu_write32_mask(priv, REG_ANTDIV_PARA1, BIT(27) | BIT(26), 2); if (ht40) /* Set Control channel to upper or lower. */ rtl8xxxu_write32_mask(priv, REG_CCK0_SYSTEM, CCK0_SIDEBAND, !sec_ch_above); /* Enable CCK */ rtl8xxxu_write32_set(priv, REG_FPGA0_RF_MODE, FPGA_RF_MODE_CCK); /* RF TRX_BW */ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); val32 &= ~MODE_AG_BW_MASK; if (ht40) val32 |= MODE_AG_BW_40MHZ_8723B; else val32 |= MODE_AG_BW_20MHZ_8723B; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32); if (priv->rf_paths > 1) rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_MODE_AG, val32); /* Modify RX DFIR parameters */ rtl8xxxu_write32_mask(priv, REG_TAP_UPD_97F, BIT(21) | BIT(20), 2); rtl8xxxu_write32_mask(priv, REG_DOWNSAM_FACTOR, BIT(29) | BIT(28), 2); if (ht40) val32 = 0x3; else val32 = 0x1a3; rtl8xxxu_write32_mask(priv, REG_RX_DFIR_MOD_97F, 0x1ff, val32); } static void rtl8192fu_init_aggregation(struct rtl8xxxu_priv *priv) { u32 agg_rx; u8 agg_ctrl; /* RX aggregation */ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL); agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN; agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH); agg_rx &= ~RXDMA_USB_AGG_ENABLE; agg_rx &= ~0xFF0F; /* reset agg size and timeout */ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl); rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx); } static int rtl8192fu_parse_efuse(struct rtl8xxxu_priv *priv) { struct rtl8192fu_efuse *efuse = &priv->efuse_wifi.efuse8192fu; int i; if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; ether_addr_copy(priv->mac_addr, efuse->mac_addr); memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base, sizeof(efuse->tx_power_index_A.cck_base)); memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base, sizeof(efuse->tx_power_index_B.cck_base)); memcpy(priv->ht40_1s_tx_power_index_A, efuse->tx_power_index_A.ht40_base, sizeof(efuse->tx_power_index_A.ht40_base)); memcpy(priv->ht40_1s_tx_power_index_B, efuse->tx_power_index_B.ht40_base, sizeof(efuse->tx_power_index_B.ht40_base)); priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; priv->ht20_tx_power_diff[0].b = efuse->tx_power_index_B.ht20_ofdm_1s_diff.b; priv->ht40_tx_power_diff[0].a = 0; priv->ht40_tx_power_diff[0].b = 0; for (i = 1; i < RTL8723B_TX_COUNT; i++) { priv->ofdm_tx_power_diff[i].a = efuse->tx_power_index_A.pwr_diff[i - 1].ofdm; priv->ofdm_tx_power_diff[i].b = efuse->tx_power_index_B.pwr_diff[i - 1].ofdm; priv->ht20_tx_power_diff[i].a = efuse->tx_power_index_A.pwr_diff[i - 1].ht20; priv->ht20_tx_power_diff[i].b = efuse->tx_power_index_B.pwr_diff[i - 1].ht20; priv->ht40_tx_power_diff[i].a = efuse->tx_power_index_A.pwr_diff[i - 1].ht40; priv->ht40_tx_power_diff[i].b = efuse->tx_power_index_B.pwr_diff[i - 1].ht40; } priv->default_crystal_cap = efuse->xtal_k & 0x3f; priv->rfe_type = efuse->rfe_option & 0x1f; if (priv->rfe_type != 5 && priv->rfe_type != 1) dev_warn(&priv->udev->dev, "%s: RFE type %d was not tested. Please send an email to linux-wireless@vger.kernel.org about this.\n", __func__, priv->rfe_type); return 0; } static int rtl8192fu_load_firmware(struct rtl8xxxu_priv *priv) { return rtl8xxxu_load_firmware(priv, "rtlwifi/rtl8192fufw.bin"); } static void rtl8192fu_init_phy_bb(struct rtl8xxxu_priv *priv) { /* Enable BB and RF */ rtl8xxxu_write16_set(priv, REG_SYS_FUNC, SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN); rtl8xxxu_write8(priv, REG_RF_CTRL, RF_ENABLE | RF_RSTB | RF_SDMRSTB); /* To Fix MAC loopback mode fail. */ rtl8xxxu_write8(priv, REG_LDOHCI12_CTRL, 0xf); rtl8xxxu_write8(priv, REG_SYS_SWR_CTRL2 + 1, 0xe9); rtl8xxxu_init_phy_regs(priv, rtl8192fu_phy_init_table); rtl8xxxu_init_phy_regs(priv, rtl8192f_agc_table); } static int rtl8192fu_init_phy_rf(struct rtl8xxxu_priv *priv) { int ret; ret = rtl8xxxu_init_phy_rf(priv, rtl8192fu_radioa_init_table, RF_A); if (ret) return ret; return rtl8xxxu_init_phy_rf(priv, rtl8192fu_radiob_init_table, RF_B); } static void rtl8192f_phy_lc_calibrate(struct rtl8xxxu_priv *priv) { u32 backup_mask = BIT(31) | BIT(30); u32 backup; u32 val32; /* Aries's NarrowBand */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT); backup = u32_get_bits(val32, backup_mask); u32p_replace_bits(&val32, 0, backup_mask); rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32); rtl8188f_phy_lc_calibrate(priv); /* Aries's NarrowBand */ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT); u32p_replace_bits(&val32, backup, backup_mask); rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32); /* reset OFDM state */ rtl8xxxu_write32_clear(priv, REG_FPGA0_RF_MODE, FPGA_RF_MODE_OFDM); rtl8xxxu_write32_set(priv, REG_FPGA0_RF_MODE, FPGA_RF_MODE_OFDM); } static int rtl8192fu_iqk_path_a(struct rtl8xxxu_priv *priv) { u32 reg_eac, reg_e94, reg_e9c, val32; u32 rf_0x58_i, rf_0x58_q; u8 rfe = priv->rfe_type; int result = 0; int ktime, i; /* Leave IQK mode */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xccf000c0); rtl8xxxu_write32(priv, REG_ANAPWR1, 0x44ffbb44); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00400040); rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x6f005403); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000804e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x04203400); rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_GAIN_CCA, BIT(4), 1); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_GAIN_CCA, BIT(11), 1); if (rfe == 7 || rfe == 8 || rfe == 9 || rfe == 12) val32 = 0x30; else val32 = 0xe9; rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_PAD_TXG, 0x003ff, val32); rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0x808000); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x8214000f); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28140000); rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00e62911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa005800); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8005800); mdelay(15); ktime = 0; while (rtl8xxxu_read32(priv, REG_IQK_RPT_TXA) == 0 && ktime < 21) { mdelay(5); ktime += 5; } /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); /* reload 0xdf and CCK_IND off */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_WE_LUT, BIT(4), 1); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_TXMOD); rf_0x58_i = u32_get_bits(val32, 0xfc000); rf_0x58_q = u32_get_bits(val32, 0x003f0); for (i = 0; i < 8; i++) { rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_TXPA_G3, 0x1c000, i); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_TXPA_G3, 0x00fc0, rf_0x58_i); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_TXPA_G3, 0x0003f, rf_0x58_q); } rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_AC, BIT(14), 0); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_WE_LUT, BIT(4), 0); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_GAIN_CCA, 0x00810, 0); if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000)) result |= 0x01; return result; } static int rtl8192fu_rx_iqk_path_a(struct rtl8xxxu_priv *priv) { u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32; int result = 0; int ktime; /* Leave IQK mode */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); /* PA/PAD control by 0x56, and set = 0x0 */ rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_GAIN_CCA, BIT(1), 1); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_P1, 0); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_GAIN_CCA, BIT(11), 1); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_PAD_TXG, 0x003ff, 0x27); /* Enter IQK mode */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0x808000); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160027); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160000); /* Tx IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0086a911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa005800); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8005800); mdelay(15); ktime = 0; while (rtl8xxxu_read32(priv, REG_IQK_RPT_TXA) == 0 && ktime < 21) { mdelay(5); ktime += 5; } /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); if (!(reg_eac & BIT(28)) && ((reg_e94 & 0x03ff0000) != 0x01420000) && ((reg_e9c & 0x03ff0000) != 0x00420000)) { result |= 0x01; } else { /* If TX not OK, ignore RX */ /* PA/PAD controlled by 0x0 */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_GAIN_CCA, BIT(11), 0); return result; } val32 = 0x80007c00 | (reg_e94 & 0x3ff0000) | ((reg_e9c & 0x3ff0000) >> 16); rtl8xxxu_write32(priv, REG_TX_IQK, val32); /* Modify RX IQK mode table */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); /* PA/PAD control by 0x56, and set = 0x0 */ rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_GAIN_CCA, BIT(1), 1); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_P1, 0); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_GAIN_CCA, BIT(11), 1); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_PAD_TXG, 0x003ff, 0x1e0); rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xccf000c0); rtl8xxxu_write32(priv, REG_ANAPWR1, 0x44ffbb44); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00400040); rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x6f005403); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000804e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x04203400); rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100); /* Enter IQK mode */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0x808000); /* path-A IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82170000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28170000); /* RX IQK setting */ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a8d1); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa005800); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8005800); mdelay(15); ktime = 0; while (rtl8xxxu_read32(priv, REG_IQK_RPT_RXA) == 0 && ktime < 21) { mdelay(5); ktime += 5; } /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); /* Leave IQK mode */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write_rfreg_mask(priv, RF_A, RF6052_REG_GAIN_CCA, BIT(11), 0); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_P1, 0x02000); if (!(reg_eac & BIT(27)) && ((reg_ea4 & 0x03ff0000) != 0x01320000) && ((reg_eac & 0x03ff0000) != 0x00360000)) result |= 0x02; return result; } static int rtl8192fu_iqk_path_b(struct rtl8xxxu_priv *priv) { u32 reg_eac, reg_eb4, reg_ebc, val32; u32 rf_0x58_i, rf_0x58_q; u8 rfe = priv->rfe_type; int result = 0; int ktime, i; /* PA/PAD controlled by 0x0 */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xccf000c0); rtl8xxxu_write32(priv, REG_ANAPWR1, 0x44ffbb44); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00400040); rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x6f005403); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000804e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x04203400); rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000000); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, BIT(4), 1); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, BIT(11), 1); if (rfe == 7 || rfe == 8 || rfe == 9 || rfe == 12) rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_PAD_TXG, 0x003ff, 0x30); else rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_PAD_TXG, 0x00fff, 0xe9); rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0x808000); /* Path B IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x8214000F); rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28140000); rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00e62911); /* One shot, path B LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa005800); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8005800); mdelay(15); ktime = 0; while (rtl8xxxu_read32(priv, REG_IQK_RPT_TXB) == 0 && ktime < 21) { mdelay(5); ktime += 5; } /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); /* reload 0xdf and CCK_IND off */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_WE_LUT, BIT(4), 1); val32 = rtl8xxxu_read_rfreg(priv, RF_B, RF6052_REG_TXMOD); rf_0x58_i = u32_get_bits(val32, 0xfc000); rf_0x58_q = u32_get_bits(val32, 0x003f0); for (i = 0; i < 8; i++) { rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_TXPA_G3, 0x1c000, i); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_TXPA_G3, 0x00fc0, rf_0x58_i); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_TXPA_G3, 0x0003f, rf_0x58_q); } rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_AC, BIT(14), 0); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_WE_LUT, BIT(4), 0); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, 0x00810, 0); if (!(reg_eac & BIT(31)) && ((reg_eb4 & 0x03ff0000) != 0x01420000) && ((reg_ebc & 0x03ff0000) != 0x00420000)) result |= 0x01; else dev_warn(&priv->udev->dev, "%s: Path B IQK failed!\n", __func__); return result; } static int rtl8192fu_rx_iqk_path_b(struct rtl8xxxu_priv *priv) { u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, val32; int result = 0; int ktime; /* Leave IQK mode */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, BIT(1), 1); rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_GAIN_P1, 0); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, BIT(11), 1); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_PAD_TXG, 0x003ff, 0x67); rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xccf000c0); rtl8xxxu_write32(priv, REG_ANAPWR1, 0x44ffbb44); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00400040); rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x6f005403); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000804e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x04203400); rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000000); rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0x808000); /* path-B IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82160027); rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28160000); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0086a911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa005800); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8005800); mdelay(15); ktime = 0; while (rtl8xxxu_read32(priv, REG_IQK_RPT_TXB) == 0 && ktime < 21) { mdelay(5); ktime += 5; } /* Check failed */ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); if (!(reg_eac & BIT(31)) && ((reg_eb4 & 0x03ff0000) != 0x01420000) && ((reg_ebc & 0x03ff0000) != 0x00420000)) { result |= 0x01; } else { /* PA/PAD controlled by 0x0 */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, BIT(11), 0); return result; } val32 = 0x80007c00 | (reg_eb4 & 0x03ff0000) | ((reg_ebc >> 16) & 0x03ff); rtl8xxxu_write32(priv, REG_TX_IQK, val32); /* Modify RX IQK mode table */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, BIT(1), 1); rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_GAIN_P1, 0); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, BIT(11), 1); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_PAD_TXG, 0x003ff, 0x1e0); rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xccf000c0); rtl8xxxu_write32(priv, REG_ANAPWR1, 0x44ffbb44); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00400040); rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x6f005403); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000804e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x04203400); rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000000); rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0x808000); /* Path B IQK setting */ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x18008c1c); rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82170000); rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28170000); /* IQK setting */ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); /* LO calibration setting */ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911); /* One shot, path A LOK & IQK */ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa005800); rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8005800); mdelay(15); ktime = 0; while (rtl8xxxu_read32(priv, REG_IQK_RPT_RXB) == 0 && ktime < 21) { mdelay(5); ktime += 5; } reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2); reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2); rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, BIT(11), 0); rtl8xxxu_write_rfreg_mask(priv, RF_B, RF6052_REG_GAIN_CCA, BIT(1), 0); rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_GAIN_P1, 0x02000); if (!(reg_eac & BIT(30)) && ((reg_ec4 & 0x03ff0000) != 0x01320000) && ((reg_ecc & 0x03ff0000) != 0x00360000)) result |= 0x02; else dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n", __func__); return result; } static void rtl8192fu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, int result[][8], int t) { static const u32 adda_regs[2] = { REG_ANAPWR1, REG_RX_WAIT_CCA }; static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = { REG_TXPAUSE, REG_BEACON_CTRL, REG_BEACON_CTRL_1, REG_GPIO_MUXCFG }; static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = { REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR, REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B, REG_DPDT_CTRL, REG_RFE_CTRL_ANTA_SRC, REG_RFE_CTRL_ANT_SRC2, REG_CCK0_AFE_SETTING }; u32 rx_initial_gain_a, rx_initial_gain_b; struct device *dev = &priv->udev->dev; int path_a_ok, path_b_ok; u8 rfe = priv->rfe_type; int retry = 2; u32 i, val32; /* * Note: IQ calibration must be performed after loading * PHY_REG.txt , and radio_a, radio_b.txt */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rx_initial_gain_a = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); rx_initial_gain_b = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1); if (t == 0) { /* Save ADDA parameters, turn Path A ADDA on */ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup, ARRAY_SIZE(adda_regs)); rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup); rtl8xxxu_save_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); } /* Instead of rtl8xxxu_path_adda_on */ rtl8xxxu_write32_set(priv, REG_FPGA0_XCD_RF_PARM, BIT(31)); /* MAC settings */ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); rtl8xxxu_write8_clear(priv, REG_GPIO_MUXCFG, GPIO_MUXCFG_IO_SEL_ENBT); if (rfe == 7 || rfe == 8 || rfe == 9 || rfe == 12) { /* in ePA IQK, rfe_func_config & SW both pull down */ /* path A */ rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANTA_SRC, 0xF, 0x7); rtl8xxxu_write32_mask(priv, REG_DPDT_CTRL, 0x1, 0x0); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANTA_SRC, 0xF00, 0x7); rtl8xxxu_write32_mask(priv, REG_DPDT_CTRL, 0x4, 0x0); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANTA_SRC, 0xF000, 0x7); rtl8xxxu_write32_mask(priv, REG_DPDT_CTRL, 0x8, 0x0); /* path B */ rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANT_SRC2, 0xF0, 0x7); rtl8xxxu_write32_mask(priv, REG_DPDT_CTRL, 0x20000, 0x0); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANT_SRC2, 0xF0000, 0x7); rtl8xxxu_write32_mask(priv, REG_DPDT_CTRL, 0x100000, 0x0); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANT_SRC3, 0xF000, 0x7); rtl8xxxu_write32_mask(priv, REG_DPDT_CTRL, 0x8000000, 0x0); } if (priv->rf_paths > 1) { /* path B standby */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0x000000); rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x10000); rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0x808000); } for (i = 0; i < retry; i++) { path_a_ok = rtl8192fu_iqk_path_a(priv); if (path_a_ok == 0x01) { val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); result[t][0] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); result[t][1] = (val32 >> 16) & 0x3ff; break; } else { result[t][0] = 0x100; result[t][1] = 0x0; } } for (i = 0; i < retry; i++) { path_a_ok = rtl8192fu_rx_iqk_path_a(priv); if (path_a_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); result[t][2] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); result[t][3] = (val32 >> 16) & 0x3ff; break; } else { result[t][2] = 0x100; result[t][3] = 0x0; } } if (!path_a_ok) dev_warn(dev, "%s: Path A IQK failed!\n", __func__); if (priv->rf_paths > 1) { for (i = 0; i < retry; i++) { path_b_ok = rtl8192fu_iqk_path_b(priv); if (path_b_ok == 0x01) { val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); result[t][4] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); result[t][5] = (val32 >> 16) & 0x3ff; break; } else { result[t][4] = 0x100; result[t][5] = 0x0; } } for (i = 0; i < retry; i++) { path_b_ok = rtl8192fu_rx_iqk_path_b(priv); if (path_b_ok == 0x03) { val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2); result[t][6] = (val32 >> 16) & 0x3ff; val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2); result[t][7] = (val32 >> 16) & 0x3ff; break; } else { result[t][6] = 0x100; result[t][7] = 0x0; } } if (!path_b_ok) dev_warn(dev, "%s: Path B IQK failed!\n", __func__); } /* Back to BB mode, load original value */ rtl8xxxu_write32_mask(priv, REG_FPGA0_IQK, 0xffffff00, 0); rtl8xxxu_write32(priv, REG_FPGA0_ANALOG4, 0xcc0000c0); rtl8xxxu_write32(priv, REG_ANAPWR1, 0x44bbbb44); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x80408040); rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x6f005433); rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000004e4); rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x04003400); rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100); /* Reload ADDA power saving parameters */ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup, ARRAY_SIZE(adda_regs)); /* Reload MAC parameters */ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup); /* Reload BB parameters */ rtl8xxxu_restore_regs(priv, iqk_bb_regs, priv->bb_backup, RTL8XXXU_BB_REGS); rtl8xxxu_write32_clear(priv, REG_FPGA0_XCD_RF_PARM, BIT(31)); /* Restore RX initial gain */ rtl8xxxu_write32_mask(priv, REG_OFDM0_XA_AGC_CORE1, 0xff, 0x50); rtl8xxxu_write32_mask(priv, REG_OFDM0_XA_AGC_CORE1, 0xff, rx_initial_gain_a & 0xff); if (priv->rf_paths > 1) { rtl8xxxu_write32_mask(priv, REG_OFDM0_XB_AGC_CORE1, 0xff, 0x50); rtl8xxxu_write32_mask(priv, REG_OFDM0_XB_AGC_CORE1, 0xff, rx_initial_gain_b & 0xff); } } static void rtl8192fu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) { s32 reg_e94, reg_e9c, reg_ea4, reg_eac; s32 reg_eb4, reg_ebc, reg_ec4, reg_ecc; struct device *dev = &priv->udev->dev; u32 path_a_0xdf, path_a_0x35; u32 path_b_0xdf, path_b_0x35; bool path_a_ok, path_b_ok; u8 rfe = priv->rfe_type; u32 rfe_path_select; int result[4][8]; /* last is final result */ int i, candidate; s32 reg_tmp = 0; bool simu; u32 val32; rfe_path_select = rtl8xxxu_read32(priv, REG_RFE_PATH_SELECT); path_a_0xdf = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA); path_a_0x35 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_GAIN_P1); path_b_0xdf = rtl8xxxu_read_rfreg(priv, RF_B, RF6052_REG_GAIN_CCA); path_b_0x35 = rtl8xxxu_read_rfreg(priv, RF_B, RF6052_REG_GAIN_P1); memset(result, 0, sizeof(result)); candidate = -1; path_a_ok = false; path_b_ok = false; for (i = 0; i < 3; i++) { rtl8192fu_phy_iqcalibrate(priv, result, i); if (i == 1) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 1); if (simu) { candidate = 0; break; } } if (i == 2) { simu = rtl8xxxu_gen2_simularity_compare(priv, result, 0, 2); if (simu) { candidate = 0; break; } simu = rtl8xxxu_gen2_simularity_compare(priv, result, 1, 2); if (simu) { candidate = 1; } else { for (i = 0; i < 8; i++) reg_tmp += result[3][i]; if (reg_tmp) candidate = 3; else candidate = -1; } } } if (candidate >= 0) { reg_e94 = result[candidate][0]; reg_e9c = result[candidate][1]; reg_ea4 = result[candidate][2]; reg_eac = result[candidate][3]; reg_eb4 = result[candidate][4]; reg_ebc = result[candidate][5]; reg_ec4 = result[candidate][6]; reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, "%s: e94=%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%c\n", __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; } rtl8xxxu_write32_mask(priv, REG_TX_IQK_TONE_A, 0x3ff00000, 0x100); rtl8xxxu_write32_mask(priv, REG_NP_ANTA, 0x3ff, 0); rtl8xxxu_write32_mask(priv, REG_TX_IQK_TONE_B, 0x3ff00000, 0x100); rtl8xxxu_write32_mask(priv, REG_TAP_UPD_97F, 0x3ff, 0); if (candidate >= 0) { if (reg_e94) rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result, candidate, (reg_ea4 == 0)); if (reg_eb4) rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result, candidate, (reg_ec4 == 0)); } rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_CCA, path_a_0xdf); rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_GAIN_P1, path_a_0x35); rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_GAIN_CCA, path_b_0xdf); rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_GAIN_P1, path_b_0x35); if (rfe == 7 || rfe == 8 || rfe == 9 || rfe == 12) { rtl8xxxu_write32_set(priv, REG_SW_GPIO_SHARE_CTRL_1, 0x70000); rtl8xxxu_write32_clear(priv, REG_LEDCFG0, 0x6c00000); rtl8xxxu_write32_set(priv, REG_PAD_CTRL1, BIT(29) | BIT(28)); rtl8xxxu_write32_clear(priv, REG_SW_GPIO_SHARE_CTRL_0, 0x600000 | BIT(4)); /* * Originally: * odm_set_bb_reg(dm, R_0x944, BIT(11) | 0x1F, 0x3F); * * It clears bit 11 and sets bits 0..4. The mask doesn't cover * bit 5 so it's not modified. Is that what it's supposed to * accomplish? */ val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER); val32 &= ~BIT(11); val32 |= 0x1f; rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32); if (rfe == 7) { rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANTA_SRC, 0xfffff, 0x23200); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANT_SRC2, 0xfffff, 0x23200); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANT_SRC1, 0xf000, 0x3); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANT_SRC3, 0xf000, 0x3); } else { rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANTA_SRC, 0xfffff, 0x22200); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANT_SRC2, 0xfffff, 0x22200); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANT_SRC1, 0xf000, 0x2); rtl8xxxu_write32_mask(priv, REG_RFE_CTRL_ANT_SRC3, 0xf000, 0x2); } rtl8xxxu_write32_clear(priv, REG_RFE_OPT62, BIT(2)); if (rfe == 7) rtl8xxxu_write32(priv, REG_RFE_OPT, 0x03000003); rtl8xxxu_write32(priv, REG_RFE_PATH_SELECT, rfe_path_select); } } static void rtl8192fu_disabled_to_emu(struct rtl8xxxu_priv *priv) { rtl8xxxu_write16_clear(priv, REG_APS_FSMCO, APS_FSMCO_HW_POWERDOWN | APS_FSMCO_HW_SUSPEND); rtl8xxxu_write32_clear(priv, REG_GPIO_INTM, BIT(16)); rtl8xxxu_write16_clear(priv, REG_APS_FSMCO, APS_FSMCO_PCIE | APS_FSMCO_HW_SUSPEND); } static int rtl8192fu_emu_to_active(struct rtl8xxxu_priv *priv) { u32 val32; u16 val16; int count; /* enable LDOA12 MACRO block for all interface */ rtl8xxxu_write8_set(priv, REG_LDOA15_CTRL, LDOA15_ENABLE); /* disable BT_GPS_SEL pins */ rtl8xxxu_write32_clear(priv, REG_PAD_CTRL1, BIT(28)); mdelay(1); /* release analog Ips to digital */ rtl8xxxu_write8_clear(priv, REG_SYS_ISO_CTRL, SYS_ISO_ANALOG_IPS); val16 = APS_FSMCO_PCIE | APS_FSMCO_HW_SUSPEND | APS_FSMCO_SW_LPS; rtl8xxxu_write16_clear(priv, REG_APS_FSMCO, val16); /* wait till 0x04[17] = 1 power ready */ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if (val32 & BIT(17)) break; udelay(10); } if (!count) return -EBUSY; rtl8xxxu_write32_set(priv, REG_APS_FSMCO, APS_FSMCO_WLON_RESET); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if ((val32 & (APS_FSMCO_MAC_ENABLE | APS_FSMCO_MAC_OFF)) == 0) break; udelay(10); } if (!count) return -EBUSY; /* SWR OCP enable */ rtl8xxxu_write32_set(priv, REG_AFE_MISC, BIT(18)); rtl8xxxu_write16_clear(priv, REG_APS_FSMCO, APS_FSMCO_HW_POWERDOWN); rtl8xxxu_write16_clear(priv, REG_APS_FSMCO, APS_FSMCO_PCIE | APS_FSMCO_HW_SUSPEND); /* 0x7c[31]=1, LDO has max output capability */ rtl8xxxu_write32_set(priv, REG_LDO_SW_CTRL, BIT(31)); rtl8xxxu_write16_set(priv, REG_APS_FSMCO, APS_FSMCO_MAC_ENABLE); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) break; udelay(10); } if (!count) return -EBUSY; /* Enable WL control XTAL setting */ rtl8xxxu_write8_set(priv, REG_AFE_MISC, AFE_MISC_WL_XTAL_CTRL); /* Enable falling edge triggering interrupt */ rtl8xxxu_write16_set(priv, REG_GPIO_INTM, GPIO_INTM_EDGE_TRIG_IRQ); /* Enable GPIO9 data mode */ rtl8xxxu_write16_clear(priv, REG_GPIO_IO_SEL_2, GPIO_IO_SEL_2_GPIO09_IRQ); /* Enable GPIO9 input mode */ rtl8xxxu_write16_clear(priv, REG_GPIO_IO_SEL_2, GPIO_IO_SEL_2_GPIO09_INPUT); /* Enable HSISR GPIO[C:0] interrupt */ rtl8xxxu_write8_set(priv, REG_HSIMR, BIT(0)); /* RF HW ON/OFF Enable */ rtl8xxxu_write8_clear(priv, REG_MULTI_FUNC_CTRL, MULTI_WIFI_HW_ROF_EN); /* Register Lock Disable */ rtl8xxxu_write8_set(priv, REG_RSV_CTRL, BIT(7)); /* For GPIO9 internal pull high setting */ rtl8xxxu_write16_set(priv, REG_MULTI_FUNC_CTRL, BIT(14)); /* reset RF path S1 */ rtl8xxxu_write8(priv, REG_RF_CTRL, 0); /* reset RF path S0 */ rtl8xxxu_write8(priv, REG_AFE_CTRL4 + 3, 0); /* enable RF path S1 */ rtl8xxxu_write8(priv, REG_RF_CTRL, RF_SDMRSTB | RF_RSTB | RF_ENABLE); /* enable RF path S0 */ rtl8xxxu_write8(priv, REG_AFE_CTRL4 + 3, RF_SDMRSTB | RF_RSTB | RF_ENABLE); /* AFE_Ctrl */ rtl8xxxu_write8_set(priv, REG_RSVD_1, BIT(5)); /* AFE_Ctrl */ rtl8xxxu_write8(priv, REG_RSVD_4, 0xcc); /* AFE_Ctrl 0x24[4:3]=00 for xtal gmn */ rtl8xxxu_write8_clear(priv, REG_AFE_XTAL_CTRL, BIT(4) | BIT(3)); /* GPIO_A[31:0] Pull down software register */ rtl8xxxu_write32(priv, REG_GPIO_A0, 0xffffffff); /* GPIO_B[7:0] Pull down software register */ rtl8xxxu_write8(priv, REG_GPIO_B0, 0xff); /* Register Lock Enable */ rtl8xxxu_write8_clear(priv, REG_RSV_CTRL, BIT(7)); return 0; } static int rtl8192fu_active_to_emu(struct rtl8xxxu_priv *priv) { u32 val32; int count; /* Reset BB, RF enter Power Down mode */ rtl8xxxu_write8_clear(priv, REG_SYS_FUNC, SYS_FUNC_BBRSTB); /* Enable rising edge triggering interrupt */ rtl8xxxu_write16_clear(priv, REG_GPIO_INTM, GPIO_INTM_EDGE_TRIG_IRQ); /* release WLON reset */ rtl8xxxu_write32_set(priv, REG_APS_FSMCO, APS_FSMCO_WLON_RESET); /* turn off MAC by HW state machine */ rtl8xxxu_write16_set(priv, REG_APS_FSMCO, APS_FSMCO_MAC_OFF); for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); if ((val32 & APS_FSMCO_MAC_OFF) == 0) break; udelay(10); } if (!count) return -EBUSY; /* analog Ips to digital, 1:isolation */ rtl8xxxu_write8_set(priv, REG_SYS_ISO_CTRL, SYS_ISO_ANALOG_IPS); /* disable LDOA12 MACRO block */ rtl8xxxu_write8_clear(priv, REG_LDOA15_CTRL, LDOA15_ENABLE); return 0; } static int rtl8192fu_emu_to_disabled(struct rtl8xxxu_priv *priv) { u16 val16; /* SOP option to disable BG/MB */ rtl8xxxu_write8(priv, REG_APS_FSMCO + 3, 0x20); /* 0x04[12:11] = 2b'01 enable WL suspend */ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO); val16 &= ~APS_FSMCO_PCIE; val16 |= APS_FSMCO_HW_SUSPEND; rtl8xxxu_write16(priv, REG_APS_FSMCO, val16); /* enable GPIO9 as EXT WAKEUP */ rtl8xxxu_write32_set(priv, REG_GPIO_INTM, BIT(16)); return 0; } static int rtl8192fu_active_to_lps(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; u16 val16; u32 val32; int retry; /* Tx Pause */ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); retry = 100; /* Poll 32 bit wide REG_SCH_TX_CMD for 0 to ensure no TX is pending. */ do { val32 = rtl8xxxu_read32(priv, REG_SCH_TX_CMD); if (!val32) break; udelay(10); } while (retry--); if (!retry) { dev_warn(dev, "%s: Failed to flush TX queue\n", __func__); return -EBUSY; } /* Disable CCK and OFDM, clock gated */ rtl8xxxu_write8_clear(priv, REG_SYS_FUNC, SYS_FUNC_BBRSTB); udelay(2); /* Whole BB is reset */ rtl8xxxu_write8_clear(priv, REG_SYS_FUNC, SYS_FUNC_BB_GLB_RSTN); /* Reset MAC TRX */ val16 = rtl8xxxu_read16(priv, REG_CR); val16 &= 0xff00; val16 |= CR_HCI_RXDMA_ENABLE | CR_HCI_TXDMA_ENABLE; val16 &= ~CR_SECURITY_ENABLE; rtl8xxxu_write16(priv, REG_CR, val16); /* Respond TxOK to scheduler */ rtl8xxxu_write8_set(priv, REG_DUAL_TSF_RST, DUAL_TSF_TX_OK); return 0; } static int rtl8192fu_power_on(struct rtl8xxxu_priv *priv) { u16 val16; int ret; rtl8xxxu_write8(priv, REG_USB_ACCESS_TIMEOUT, 0x80); rtl8192fu_disabled_to_emu(priv); ret = rtl8192fu_emu_to_active(priv); if (ret) return ret; rtl8xxxu_write16(priv, REG_CR, 0); val16 = rtl8xxxu_read16(priv, REG_CR); val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE | CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE | CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE; rtl8xxxu_write16(priv, REG_CR, val16); return 0; } static void rtl8192fu_power_off(struct rtl8xxxu_priv *priv) { rtl8xxxu_flush_fifo(priv); /* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */ rtl8xxxu_write8_clear(priv, REG_TX_REPORT_CTRL, TX_REPORT_CTRL_TIMER_ENABLE); /* stop rx */ rtl8xxxu_write8(priv, REG_CR, 0x00); rtl8192fu_active_to_lps(priv); /* Reset Firmware if running in RAM */ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL) rtl8xxxu_firmware_self_reset(priv); /* Reset MCU */ rtl8xxxu_write16_clear(priv, REG_SYS_FUNC, SYS_FUNC_CPU_ENABLE); /* Reset MCU ready status */ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); rtl8192fu_active_to_emu(priv); rtl8192fu_emu_to_disabled(priv); } static void rtl8192f_reset_8051(struct rtl8xxxu_priv *priv) { rtl8xxxu_write8_clear(priv, REG_RSV_CTRL, BIT(1)); rtl8xxxu_write8_clear(priv, REG_RSV_CTRL + 1, BIT(0)); rtl8xxxu_write16_clear(priv, REG_SYS_FUNC, SYS_FUNC_CPU_ENABLE); rtl8xxxu_write8_clear(priv, REG_RSV_CTRL, BIT(1)); rtl8xxxu_write8_set(priv, REG_RSV_CTRL + 1, BIT(0)); rtl8xxxu_write16_set(priv, REG_SYS_FUNC, SYS_FUNC_CPU_ENABLE); } static void rtl8192f_enable_rf(struct rtl8xxxu_priv *priv) { u32 val32; rtl8xxxu_write8(priv, REG_RF_CTRL, RF_ENABLE | RF_RSTB | RF_SDMRSTB); val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK); val32 |= OFDM_RF_PATH_RX_A | OFDM_RF_PATH_RX_B | OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B; rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32); rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } static void rtl8192f_disable_rf(struct rtl8xxxu_priv *priv) { u32 val32; val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); val32 &= ~OFDM_RF_PATH_TX_MASK; rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32); /* Power down RF module */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0); } static void rtl8192f_usb_quirks(struct rtl8xxxu_priv *priv) { u16 val16; rtl8xxxu_gen2_usb_quirks(priv); val16 = rtl8xxxu_read16(priv, REG_CR); val16 |= (CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE); rtl8xxxu_write16(priv, REG_CR, val16); } #define XTAL1 GENMASK(6, 1) #define XTAL0 GENMASK(30, 25) static void rtl8192f_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap) { struct rtl8xxxu_cfo_tracking *cfo = &priv->cfo_tracking; u32 xtal1, xtal0; if (crystal_cap == cfo->crystal_cap) return; xtal1 = rtl8xxxu_read32(priv, REG_AFE_PLL_CTRL); xtal0 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); dev_dbg(&priv->udev->dev, "%s: Adjusting crystal cap from 0x%x (actually 0x%x 0x%x) to 0x%x\n", __func__, cfo->crystal_cap, u32_get_bits(xtal1, XTAL1), u32_get_bits(xtal0, XTAL0), crystal_cap); u32p_replace_bits(&xtal1, crystal_cap, XTAL1); u32p_replace_bits(&xtal0, crystal_cap, XTAL0); rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, xtal1); rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, xtal0); cfo->crystal_cap = crystal_cap; } static s8 rtl8192f_cck_rssi(struct rtl8xxxu_priv *priv, struct rtl8723au_phy_stats *phy_stats) { struct jaguar2_phy_stats_type0 *phy_stats0 = (struct jaguar2_phy_stats_type0 *)phy_stats; u8 lna_idx = (phy_stats0->lna_h << 3) | phy_stats0->lna_l; u8 vga_idx = phy_stats0->vga; s8 rx_pwr_all; switch (lna_idx) { case 7: rx_pwr_all = -44 - (2 * vga_idx); break; case 5: rx_pwr_all = -28 - (2 * vga_idx); break; case 3: rx_pwr_all = -10 - (2 * vga_idx); break; case 0: rx_pwr_all = 14 - (2 * vga_idx); break; default: rx_pwr_all = 0; break; } return rx_pwr_all; } static int rtl8192fu_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rtl8xxxu_priv *priv = container_of(led_cdev, struct rtl8xxxu_priv, led_cdev); u32 ledcfg; /* Values obtained by observing the USB traffic from the Windows driver. */ rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_0, 0x20080); rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_1, 0x1b0000); ledcfg = rtl8xxxu_read32(priv, REG_LEDCFG0); /* Comfast CF-826F uses LED1. Asus USB-N13 C1 uses LED0. Set both. */ u32p_replace_bits(&ledcfg, LED_GPIO_ENABLE, LEDCFG0_LED2EN); u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED0_IO_MODE); u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED1_IO_MODE); if (brightness == LED_OFF) { u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM); u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV); u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM); u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV); } else if (brightness == LED_ON) { u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM); u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED0SV); u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM); u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED1SV); } else if (brightness == RTL8XXXU_HW_LED_CONTROL) { u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS, LEDCFG0_LED0CM); u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV); u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS, LEDCFG0_LED1CM); u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV); } rtl8xxxu_write32(priv, REG_LEDCFG0, ledcfg); return 0; } struct rtl8xxxu_fileops rtl8192fu_fops = { .identify_chip = rtl8192fu_identify_chip, .parse_efuse = rtl8192fu_parse_efuse, .load_firmware = rtl8192fu_load_firmware, .power_on = rtl8192fu_power_on, .power_off = rtl8192fu_power_off, .read_efuse = rtl8xxxu_read_efuse, .reset_8051 = rtl8192f_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, .init_phy_bb = rtl8192fu_init_phy_bb, .init_phy_rf = rtl8192fu_init_phy_rf, .phy_lc_calibrate = rtl8192f_phy_lc_calibrate, .phy_iq_calibrate = rtl8192fu_phy_iq_calibrate, .config_channel = rtl8192fu_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc24, .parse_phystats = jaguar2_rx_parse_phystats, .init_aggregation = rtl8192fu_init_aggregation, .init_burst = rtl8xxxu_init_burst, .enable_rf = rtl8192f_enable_rf, .disable_rf = rtl8192f_disable_rf, .usb_quirks = rtl8192f_usb_quirks, .set_tx_power = rtl8192f_set_tx_power, .update_rate_mask = rtl8xxxu_gen2_update_rate_mask, .report_connect = rtl8xxxu_gen2_report_connect, .report_rssi = rtl8xxxu_gen2_report_rssi, .fill_txdesc = rtl8xxxu_fill_txdesc_v2, .set_crystal_cap = rtl8192f_set_crystal_cap, .cck_rssi = rtl8192f_cck_rssi, .led_classdev_brightness_set = rtl8192fu_led_brightness_set, .writeN_block_size = 254, .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24), .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), .has_tx_report = 1, .gen2_thermal_meter = 1, .needs_full_init = 1, .init_reg_rxfltmap = 1, .init_reg_pkt_life_time = 1, .init_reg_hmtfr = 1, .ampdu_max_time = 0x5e, .ustime_tsf_edca = 0x50, .max_aggr_num = 0x1f1f, .supports_ap = 1, .max_macid_num = 128, .max_sec_cam_num = 64, .trxff_boundary = 0x3f3f, .pbp_rx = PBP_PAGE_SIZE_256, .pbp_tx = PBP_PAGE_SIZE_256, .mactable = rtl8192f_mac_init_table, .total_page_num = TX_TOTAL_PAGE_NUM_8192F, .page_num_hi = TX_PAGE_NUM_HI_PQ_8192F, .page_num_lo = TX_PAGE_NUM_LO_PQ_8192F, .page_num_norm = TX_PAGE_NUM_NORM_PQ_8192F, }; |
13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 | // SPDX-License-Identifier: GPL-2.0-only /* * cfg80211 debugfs * * Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2023 Intel Corporation */ #include <linux/slab.h> #include "core.h" #include "debugfs.h" #define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct wiphy *wiphy = file->private_data; \ char buf[buflen]; \ int res; \ \ res = scnprintf(buf, buflen, fmt "\n", ##value); \ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ } \ \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ } DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", wiphy->rts_threshold); DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", wiphy->frag_threshold); DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", wiphy->retry_short); DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", wiphy->retry_long); static int ht_print_chan(struct ieee80211_channel *chan, char *buf, int buf_size, int offset) { if (WARN_ON(offset > buf_size)) return 0; if (chan->flags & IEEE80211_CHAN_DISABLED) return scnprintf(buf + offset, buf_size - offset, "%d Disabled\n", chan->center_freq); return scnprintf(buf + offset, buf_size - offset, "%d HT40 %c%c\n", chan->center_freq, (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-', (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+'); } static ssize_t ht40allow_map_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct wiphy *wiphy = file->private_data; char *buf; unsigned int offset = 0, buf_size = PAGE_SIZE, i; enum nl80211_band band; struct ieee80211_supported_band *sband; ssize_t r; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) offset += ht_print_chan(&sband->channels[i], buf, buf_size, offset); } r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); kfree(buf); return r; } static const struct file_operations ht40allow_map_ops = { .read = ht40allow_map_read, .open = simple_open, .llseek = default_llseek, }; #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0444, phyd, &rdev->wiphy, &name## _ops) void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) { struct dentry *phyd = rdev->wiphy.debugfsdir; DEBUGFS_ADD(rts_threshold); DEBUGFS_ADD(fragmentation_threshold); DEBUGFS_ADD(short_retry_limit); DEBUGFS_ADD(long_retry_limit); DEBUGFS_ADD(ht40allow_map); } struct debugfs_read_work { struct wiphy_work work; ssize_t (*handler)(struct wiphy *wiphy, struct file *file, char *buf, size_t count, void *data); struct wiphy *wiphy; struct file *file; char *buf; size_t bufsize; void *data; ssize_t ret; struct completion completion; }; static void wiphy_locked_debugfs_read_work(struct wiphy *wiphy, struct wiphy_work *work) { struct debugfs_read_work *w = container_of(work, typeof(*w), work); w->ret = w->handler(w->wiphy, w->file, w->buf, w->bufsize, w->data); complete(&w->completion); } static void wiphy_locked_debugfs_read_cancel(struct dentry *dentry, void *data) { struct debugfs_read_work *w = data; wiphy_work_cancel(w->wiphy, &w->work); complete(&w->completion); } ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*handler)(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, void *data), void *data) { struct debugfs_read_work work = { .handler = handler, .wiphy = wiphy, .file = file, .buf = buf, .bufsize = bufsize, .data = data, .ret = -ENODEV, .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), }; struct debugfs_cancellation cancellation = { .cancel = wiphy_locked_debugfs_read_cancel, .cancel_data = &work, }; /* don't leak stack data or whatever */ memset(buf, 0, bufsize); wiphy_work_init(&work.work, wiphy_locked_debugfs_read_work); wiphy_work_queue(wiphy, &work.work); debugfs_enter_cancellation(file, &cancellation); wait_for_completion(&work.completion); debugfs_leave_cancellation(file, &cancellation); if (work.ret < 0) return work.ret; if (WARN_ON(work.ret > bufsize)) return -EINVAL; return simple_read_from_buffer(userbuf, count, ppos, buf, work.ret); } EXPORT_SYMBOL_GPL(wiphy_locked_debugfs_read); struct debugfs_write_work { struct wiphy_work work; ssize_t (*handler)(struct wiphy *wiphy, struct file *file, char *buf, size_t count, void *data); struct wiphy *wiphy; struct file *file; char *buf; size_t count; void *data; ssize_t ret; struct completion completion; }; static void wiphy_locked_debugfs_write_work(struct wiphy *wiphy, struct wiphy_work *work) { struct debugfs_write_work *w = container_of(work, typeof(*w), work); w->ret = w->handler(w->wiphy, w->file, w->buf, w->count, w->data); complete(&w->completion); } static void wiphy_locked_debugfs_write_cancel(struct dentry *dentry, void *data) { struct debugfs_write_work *w = data; wiphy_work_cancel(w->wiphy, &w->work); complete(&w->completion); } ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, const char __user *userbuf, size_t count, ssize_t (*handler)(struct wiphy *wiphy, struct file *file, char *buf, size_t count, void *data), void *data) { struct debugfs_write_work work = { .handler = handler, .wiphy = wiphy, .file = file, .buf = buf, .count = count, .data = data, .ret = -ENODEV, .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), }; struct debugfs_cancellation cancellation = { .cancel = wiphy_locked_debugfs_write_cancel, .cancel_data = &work, }; /* mostly used for strings so enforce NUL-termination for safety */ if (count >= bufsize) return -EINVAL; memset(buf, 0, bufsize); if (copy_from_user(buf, userbuf, count)) return -EFAULT; wiphy_work_init(&work.work, wiphy_locked_debugfs_write_work); wiphy_work_queue(wiphy, &work.work); debugfs_enter_cancellation(file, &cancellation); wait_for_completion(&work.completion); debugfs_leave_cancellation(file, &cancellation); return work.ret; } EXPORT_SYMBOL_GPL(wiphy_locked_debugfs_write); |
2 2 2 2 1 1 2 2 2 6 6 6 3 4 2 2 2 2 4 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/mutex.h> #include <linux/videodev2.h> #include <linux/unaligned.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> /* * 'Thanko's Raremono' is a Japanese si4734-based AM/FM/SW USB receiver: * * http://www.raremono.jp/product/484.html/ * * The USB protocol has been reversed engineered using wireshark, initially * by Dinesh Ram <dinesh.ram@cern.ch> and finished by Hans Verkuil * <hverkuil@xs4all.nl>. * * Sadly the firmware used in this product hides lots of goodies since the * si4734 has more features than are supported by the firmware. Oh well... */ /* driver and module definitions */ MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>"); MODULE_DESCRIPTION("Thanko's Raremono AM/FM/SW Receiver USB driver"); MODULE_LICENSE("GPL v2"); /* * The Device announces itself as Cygnal Integrated Products, Inc. * * The vendor and product IDs (and in fact all other lsusb information as * well) are identical to the si470x Silicon Labs USB FM Radio Reference * Design board, even though this card has a si4734 device. Clearly the * designer of this product never bothered to change the USB IDs. */ /* USB Device ID List */ static const struct usb_device_id usb_raremono_device_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_raremono_device_table); #define BUFFER_LENGTH 64 /* Timeout is set to a high value, could probably be reduced. Need more tests */ #define USB_TIMEOUT 10000 /* Frequency limits in KHz */ #define FM_FREQ_RANGE_LOW 64000 #define FM_FREQ_RANGE_HIGH 108000 #define AM_FREQ_RANGE_LOW 520 #define AM_FREQ_RANGE_HIGH 1710 #define SW_FREQ_RANGE_LOW 2300 #define SW_FREQ_RANGE_HIGH 26100 enum { BAND_FM, BAND_AM, BAND_SW }; static const struct v4l2_frequency_band bands[] = { /* Band FM */ { .type = V4L2_TUNER_RADIO, .index = 0, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = FM_FREQ_RANGE_LOW * 16, .rangehigh = FM_FREQ_RANGE_HIGH * 16, .modulation = V4L2_BAND_MODULATION_FM, }, /* Band AM */ { .type = V4L2_TUNER_RADIO, .index = 1, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = AM_FREQ_RANGE_LOW * 16, .rangehigh = AM_FREQ_RANGE_HIGH * 16, .modulation = V4L2_BAND_MODULATION_AM, }, /* Band SW */ { .type = V4L2_TUNER_RADIO, .index = 2, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = SW_FREQ_RANGE_LOW * 16, .rangehigh = SW_FREQ_RANGE_HIGH * 16, .modulation = V4L2_BAND_MODULATION_AM, }, }; struct raremono_device { struct usb_device *usbdev; struct usb_interface *intf; struct video_device vdev; struct v4l2_device v4l2_dev; struct mutex lock; u8 *buffer; u32 band; unsigned curfreq; }; static inline struct raremono_device *to_raremono_dev(struct v4l2_device *v4l2_dev) { return container_of(v4l2_dev, struct raremono_device, v4l2_dev); } /* Set frequency. */ static int raremono_cmd_main(struct raremono_device *radio, unsigned band, unsigned freq) { unsigned band_offset; int ret; switch (band) { case BAND_FM: band_offset = 1; freq /= 10; break; case BAND_AM: band_offset = 0; break; default: band_offset = 2; break; } radio->buffer[0] = 0x04 + band_offset; radio->buffer[1] = freq >> 8; radio->buffer[2] = freq & 0xff; ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), HID_REQ_SET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, 0x0300 + radio->buffer[0], 2, radio->buffer, 3, USB_TIMEOUT); if (ret < 0) { dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret); return ret; } radio->curfreq = (band == BAND_FM) ? freq * 10 : freq; return 0; } /* Handle unplugging the device. * We call video_unregister_device in any case. * The last function called in this procedure is * usb_raremono_device_release. */ static void usb_raremono_disconnect(struct usb_interface *intf) { struct raremono_device *radio = to_raremono_dev(usb_get_intfdata(intf)); dev_info(&intf->dev, "Thanko's Raremono disconnected\n"); mutex_lock(&radio->lock); usb_set_intfdata(intf, NULL); video_unregister_device(&radio->vdev); v4l2_device_disconnect(&radio->v4l2_dev); mutex_unlock(&radio->lock); v4l2_device_put(&radio->v4l2_dev); } /* * Linux Video interface */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { struct raremono_device *radio = video_drvdata(file); strscpy(v->driver, "radio-raremono", sizeof(v->driver)); strscpy(v->card, "Thanko's Raremono", sizeof(v->card)); usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); return 0; } static int vidioc_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { if (band->tuner != 0) return -EINVAL; if (band->index >= ARRAY_SIZE(bands)) return -EINVAL; *band = bands[band->index]; return 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct raremono_device *radio = video_drvdata(file); int ret; if (v->index > 0) return -EINVAL; strscpy(v->name, "AM/FM/SW", sizeof(v->name)); v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = AM_FREQ_RANGE_LOW * 16; v->rangehigh = FM_FREQ_RANGE_HIGH * 16; v->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO; v->audmode = (radio->curfreq < FM_FREQ_RANGE_LOW) ? V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO; memset(radio->buffer, 1, BUFFER_LENGTH); ret = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 1, 0xa1, 0x030d, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT); if (ret < 0) { dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret); return ret; } v->signal = ((radio->buffer[1] & 0xf) << 8 | radio->buffer[2]) << 4; return 0; } static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *v) { return v->index ? -EINVAL : 0; } static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct raremono_device *radio = video_drvdata(file); u32 freq; unsigned band; if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; if (f->frequency >= (FM_FREQ_RANGE_LOW + SW_FREQ_RANGE_HIGH) * 8) band = BAND_FM; else if (f->frequency <= (AM_FREQ_RANGE_HIGH + SW_FREQ_RANGE_LOW) * 8) band = BAND_AM; else band = BAND_SW; freq = clamp_t(u32, f->frequency, bands[band].rangelow, bands[band].rangehigh); return raremono_cmd_main(radio, band, freq / 16); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct raremono_device *radio = video_drvdata(file); if (f->tuner != 0) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = radio->curfreq * 16; return 0; } static void raremono_device_release(struct v4l2_device *v4l2_dev) { struct raremono_device *radio = to_raremono_dev(v4l2_dev); kfree(radio->buffer); kfree(radio); } /* File system interface */ static const struct v4l2_file_operations usb_raremono_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = v4l2_fh_release, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops usb_raremono_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_enum_freq_bands = vidioc_enum_freq_bands, }; /* check if the device is present and register with v4l and usb if it is */ static int usb_raremono_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct raremono_device *radio; int retval = 0; radio = kzalloc(sizeof(*radio), GFP_KERNEL); if (!radio) return -ENOMEM; radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL); if (!radio->buffer) { kfree(radio); return -ENOMEM; } radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; /* * This device uses the same USB IDs as the si470x SiLabs reference * design. So do an additional check: attempt to read the device ID * from the si470x: the lower 12 bits are 0x0242 for the si470x. The * Raremono always returns 0x0800 (the meaning of that is unknown, but * at least it works). * * We use this check to determine which device we are dealing with. */ msleep(20); retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), HID_REQ_GET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 1, 2, radio->buffer, 3, 500); if (retval != 3 || (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) { dev_info(&intf->dev, "this is not Thanko's Raremono.\n"); retval = -ENODEV; goto free_mem; } dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n", id->idVendor, id->idProduct); retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&intf->dev, "couldn't register v4l2_device\n"); goto free_mem; } mutex_init(&radio->lock); strscpy(radio->vdev.name, radio->v4l2_dev.name, sizeof(radio->vdev.name)); radio->vdev.v4l2_dev = &radio->v4l2_dev; radio->vdev.fops = &usb_raremono_fops; radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops; radio->vdev.lock = &radio->lock; radio->vdev.release = video_device_release_empty; radio->vdev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO; radio->v4l2_dev.release = raremono_device_release; usb_set_intfdata(intf, &radio->v4l2_dev); video_set_drvdata(&radio->vdev, radio); raremono_cmd_main(radio, BAND_FM, 95160); retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1); if (retval == 0) { dev_info(&intf->dev, "V4L2 device registered as %s\n", video_device_node_name(&radio->vdev)); return 0; } dev_err(&intf->dev, "could not register video device\n"); v4l2_device_unregister(&radio->v4l2_dev); free_mem: kfree(radio->buffer); kfree(radio); return retval; } /* USB subsystem interface */ static struct usb_driver usb_raremono_driver = { .name = "radio-raremono", .probe = usb_raremono_probe, .disconnect = usb_raremono_disconnect, .id_table = usb_raremono_device_table, }; module_usb_driver(usb_raremono_driver); |
10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 | /************************************************************************** * * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. * Copyright 2016 Intel Corporation * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * **************************************************************************/ /* * Authors: * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> */ #ifndef _DRM_MM_H_ #define _DRM_MM_H_ /* * Generic range manager structs */ #include <linux/bug.h> #include <linux/rbtree.h> #include <linux/limits.h> #include <linux/mm_types.h> #include <linux/list.h> #include <linux/spinlock.h> #ifdef CONFIG_DRM_DEBUG_MM #include <linux/stackdepot.h> #endif #include <linux/types.h> #include <drm/drm_print.h> #ifdef CONFIG_DRM_DEBUG_MM #define DRM_MM_BUG_ON(expr) BUG_ON(expr) #else #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif /** * enum drm_mm_insert_mode - control search and allocation behaviour * * The &struct drm_mm range manager supports finding a suitable modes using * a number of search trees. These trees are oranised by size, by address and * in most recent eviction order. This allows the user to find either the * smallest hole to reuse, the lowest or highest address to reuse, or simply * reuse the most recent eviction that fits. When allocating the &drm_mm_node * from within the hole, the &drm_mm_insert_mode also dictate whether to * allocate the lowest matching address or the highest. */ enum drm_mm_insert_mode { /** * @DRM_MM_INSERT_BEST: * * Search for the smallest hole (within the search range) that fits * the desired node. * * Allocates the node from the bottom of the found hole. */ DRM_MM_INSERT_BEST = 0, /** * @DRM_MM_INSERT_LOW: * * Search for the lowest hole (address closest to 0, within the search * range) that fits the desired node. * * Allocates the node from the bottom of the found hole. */ DRM_MM_INSERT_LOW, /** * @DRM_MM_INSERT_HIGH: * * Search for the highest hole (address closest to U64_MAX, within the * search range) that fits the desired node. * * Allocates the node from the *top* of the found hole. The specified * alignment for the node is applied to the base of the node * (&drm_mm_node.start). */ DRM_MM_INSERT_HIGH, /** * @DRM_MM_INSERT_EVICT: * * Search for the most recently evicted hole (within the search range) * that fits the desired node. This is appropriate for use immediately * after performing an eviction scan (see drm_mm_scan_init()) and * removing the selected nodes to form a hole. * * Allocates the node from the bottom of the found hole. */ DRM_MM_INSERT_EVICT, /** * @DRM_MM_INSERT_ONCE: * * Only check the first hole for suitablity and report -ENOSPC * immediately otherwise, rather than check every hole until a * suitable one is found. Can only be used in conjunction with another * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW. */ DRM_MM_INSERT_ONCE = BIT(31), /** * @DRM_MM_INSERT_HIGHEST: * * Only check the highest hole (the hole with the largest address) and * insert the node at the top of the hole or report -ENOSPC if * unsuitable. * * Does not search all holes. */ DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE, /** * @DRM_MM_INSERT_LOWEST: * * Only check the lowest hole (the hole with the smallest address) and * insert the node at the bottom of the hole or report -ENOSPC if * unsuitable. * * Does not search all holes. */ DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE, }; /** * struct drm_mm_node - allocated block in the DRM allocator * * This represents an allocated block in a &drm_mm allocator. Except for * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is * entirely opaque and should only be accessed through the provided funcions. * Since allocation of these nodes is entirely handled by the driver they can be * embedded. */ struct drm_mm_node { /** @color: Opaque driver-private tag. */ unsigned long color; /** @start: Start address of the allocated block. */ u64 start; /** @size: Size of the allocated block. */ u64 size; /* private: */ struct drm_mm *mm; struct list_head node_list; struct list_head hole_stack; struct rb_node rb; struct rb_node rb_hole_size; struct rb_node rb_hole_addr; u64 __subtree_last; u64 hole_size; u64 subtree_max_hole; unsigned long flags; #define DRM_MM_NODE_ALLOCATED_BIT 0 #define DRM_MM_NODE_SCANNED_BIT 1 #ifdef CONFIG_DRM_DEBUG_MM depot_stack_handle_t stack; #endif }; /** * struct drm_mm - DRM allocator * * DRM range allocator with a few special functions and features geared towards * managing GPU memory. Except for the @color_adjust callback the structure is * entirely opaque and should only be accessed through the provided functions * and macros. This structure can be embedded into larger driver structures. */ struct drm_mm { /** * @color_adjust: * * Optional driver callback to further apply restrictions on a hole. The * node argument points at the node containing the hole from which the * block would be allocated (see drm_mm_hole_follows() and friends). The * other arguments are the size of the block to be allocated. The driver * can adjust the start and end as needed to e.g. insert guard pages. */ void (*color_adjust)(const struct drm_mm_node *node, unsigned long color, u64 *start, u64 *end); /* private: */ /* List of all memory nodes that immediately precede a free hole. */ struct list_head hole_stack; /* head_node.node_list is the list of all memory nodes, ordered * according to the (increasing) start address of the memory node. */ struct drm_mm_node head_node; /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ struct rb_root_cached interval_tree; struct rb_root_cached holes_size; struct rb_root holes_addr; unsigned long scan_active; }; /** * struct drm_mm_scan - DRM allocator eviction roaster data * * This structure tracks data needed for the eviction roaster set up using * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and * drm_mm_scan_remove_block(). The structure is entirely opaque and should only * be accessed through the provided functions and macros. It is meant to be * allocated temporarily by the driver on the stack. */ struct drm_mm_scan { /* private: */ struct drm_mm *mm; u64 size; u64 alignment; u64 remainder_mask; u64 range_start; u64 range_end; u64 hit_start; u64 hit_end; unsigned long color; enum drm_mm_insert_mode mode; }; /** * drm_mm_node_allocated - checks whether a node is allocated * @node: drm_mm_node to check * * Drivers are required to clear a node prior to using it with the * drm_mm range manager. * * Drivers should use this helper for proper encapsulation of drm_mm * internals. * * Returns: * True if the @node is allocated. */ static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) { return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); } /** * drm_mm_initialized - checks whether an allocator is initialized * @mm: drm_mm to check * * Drivers should clear the struct drm_mm prior to initialisation if they * want to use this function. * * Drivers should use this helper for proper encapsulation of drm_mm * internals. * * Returns: * True if the @mm is initialized. */ static inline bool drm_mm_initialized(const struct drm_mm *mm) { return READ_ONCE(mm->hole_stack.next); } /** * drm_mm_hole_follows - checks whether a hole follows this node * @node: drm_mm_node to check * * Holes are embedded into the drm_mm using the tail of a drm_mm_node. * If you wish to know whether a hole follows this particular node, * query this function. See also drm_mm_hole_node_start() and * drm_mm_hole_node_end(). * * Returns: * True if a hole follows the @node. */ static inline bool drm_mm_hole_follows(const struct drm_mm_node *node) { return node->hole_size; } static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) { return hole_node->start + hole_node->size; } /** * drm_mm_hole_node_start - computes the start of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * * This is useful for driver-specific debug dumpers. Otherwise drivers should * not inspect holes themselves. Drivers must check first whether a hole indeed * follows by looking at drm_mm_hole_follows() * * Returns: * Start of the subsequent hole. */ static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) { DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node)); return __drm_mm_hole_node_start(hole_node); } static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) { return list_next_entry(hole_node, node_list)->start; } /** * drm_mm_hole_node_end - computes the end of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * * This is useful for driver-specific debug dumpers. Otherwise drivers should * not inspect holes themselves. Drivers must check first whether a hole indeed * follows by looking at drm_mm_hole_follows(). * * Returns: * End of the subsequent hole. */ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) { return __drm_mm_hole_node_end(hole_node); } /** * drm_mm_nodes - list of nodes under the drm_mm range manager * @mm: the struct drm_mm range manager * * As the drm_mm range manager hides its node_list deep with its * structure, extracting it looks painful and repetitive. This is * not expected to be used outside of the drm_mm_for_each_node() * macros and similar internal functions. * * Returns: * The node list, may be empty. */ #define drm_mm_nodes(mm) (&(mm)->head_node.node_list) /** * drm_mm_for_each_node - iterator to walk over all allocated nodes * @entry: &struct drm_mm_node to assign to in each iteration step * @mm: &drm_mm allocator to walk * * This iterator walks over all nodes in the range allocator. It is implemented * with list_for_each(), so not save against removal of elements. */ #define drm_mm_for_each_node(entry, mm) \ list_for_each_entry(entry, drm_mm_nodes(mm), node_list) /** * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes * @entry: &struct drm_mm_node to assign to in each iteration step * @next: &struct drm_mm_node to store the next step * @mm: &drm_mm allocator to walk * * This iterator walks over all nodes in the range allocator. It is implemented * with list_for_each_safe(), so save against removal of elements. */ #define drm_mm_for_each_node_safe(entry, next, mm) \ list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) /** * drm_mm_for_each_hole - iterator to walk over all holes * @pos: &drm_mm_node used internally to track progress * @mm: &drm_mm allocator to walk * @hole_start: ulong variable to assign the hole start to on each iteration * @hole_end: ulong variable to assign the hole end to on each iteration * * This iterator walks over all holes in the range allocator. It is implemented * with list_for_each(), so not save against removal of elements. @entry is used * internally and will not reflect a real drm_mm_node for the very first hole. * Hence users of this iterator may not access it. * * Implementation Note: * We need to inline list_for_each_entry in order to be able to set hole_start * and hole_end on each iteration while keeping the macro sane. */ #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ for (pos = list_first_entry(&(mm)->hole_stack, \ typeof(*pos), hole_stack); \ &pos->hole_stack != &(mm)->hole_stack ? \ hole_start = drm_mm_hole_node_start(pos), \ hole_end = hole_start + pos->hole_size, \ 1 : 0; \ pos = list_next_entry(pos, hole_stack)) /* * Basic range manager support (drm_mm.c) */ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color, u64 start, u64 end, enum drm_mm_insert_mode mode); /** * drm_mm_insert_node_generic - search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert * @size: size of the allocation * @alignment: alignment of the allocation * @color: opaque tag value to use for this node * @mode: fine-tune the allocation search and placement * * This is a simplified version of drm_mm_insert_node_in_range() with no * range restrictions applied. * * The preallocated node must be cleared to 0. * * Returns: * 0 on success, -ENOSPC if there's no suitable hole. */ static inline int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color, enum drm_mm_insert_mode mode) { return drm_mm_insert_node_in_range(mm, node, size, alignment, color, 0, U64_MAX, mode); } /** * drm_mm_insert_node - search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert * @size: size of the allocation * * This is a simplified version of drm_mm_insert_node_generic() with @color set * to 0. * * The preallocated node must be cleared to 0. * * Returns: * 0 on success, -ENOSPC if there's no suitable hole. */ static inline int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, u64 size) { return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0); } void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); void drm_mm_takedown(struct drm_mm *mm); /** * drm_mm_clean - checks whether an allocator is clean * @mm: drm_mm allocator to check * * Returns: * True if the allocator is completely free, false if there's still a node * allocated in it. */ static inline bool drm_mm_clean(const struct drm_mm *mm) { return list_empty(drm_mm_nodes(mm)); } struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); /** * drm_mm_for_each_node_in_range - iterator to walk over a range of * allocated nodes * @node__: drm_mm_node structure to assign to in each iteration step * @mm__: drm_mm allocator to walk * @start__: starting offset, the first node will overlap this * @end__: ending offset, the last node will start before this (but may overlap) * * This iterator walks over all nodes in the range allocator that lie * between @start and @end. It is implemented similarly to list_for_each(), * but using the internal interval tree to accelerate the search for the * starting node, and so not safe against removal of elements. It assumes * that @end is within (or is the upper limit of) the drm_mm allocator. * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk * over the special _unallocated_ &drm_mm.head_node, and may even continue * indefinitely. */ #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ node__->start < (end__); \ node__ = list_next_entry(node__, node_list)) void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, struct drm_mm *mm, u64 size, u64 alignment, unsigned long color, u64 start, u64 end, enum drm_mm_insert_mode mode); /** * drm_mm_scan_init - initialize lru scanning * @scan: scan state * @mm: drm_mm to scan * @size: size of the allocation * @alignment: alignment of the allocation * @color: opaque tag value to use for the allocation * @mode: fine-tune the allocation search and placement * * This is a simplified version of drm_mm_scan_init_with_range() with no range * restrictions applied. * * This simply sets up the scanning routines with the parameters for the desired * hole. * * Warning: * As long as the scan list is non-empty, no other operations than * adding/removing nodes to/from the scan list are allowed. */ static inline void drm_mm_scan_init(struct drm_mm_scan *scan, struct drm_mm *mm, u64 size, u64 alignment, unsigned long color, enum drm_mm_insert_mode mode) { drm_mm_scan_init_with_range(scan, mm, size, alignment, color, 0, U64_MAX, mode); } bool drm_mm_scan_add_block(struct drm_mm_scan *scan, struct drm_mm_node *node); bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, struct drm_mm_node *node); struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p); #endif |
1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for VRC-2 2-axis Car controller * * Copyright (C) 2022 Marcus Folkesson <marcus.folkesson@gmail.com> */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> /* * VID/PID are probably "borrowed", so keep them locally and * do not populate hid-ids.h with those. */ #define USB_VENDOR_ID_VRC2 (0x07c0) #define USB_DEVICE_ID_VRC2 (0x1125) static const __u8 vrc2_rdesc_fixed[] = { 0x05, 0x01, // Usage Page (Generic Desktop Ctrls) 0x09, 0x04, // Usage (Joystick) 0xA1, 0x01, // Collection (Application) 0x09, 0x01, // Usage (Pointer) 0xA1, 0x00, // Collection (Physical) 0x09, 0x30, // Usage (X) 0x09, 0x31, // Usage (Y) 0x15, 0x00, // Logical Minimum (0) 0x26, 0xFF, 0x07, // Logical Maximum (2047) 0x35, 0x00, // Physical Minimum (0) 0x46, 0xFF, 0x00, // Physical Maximum (255) 0x75, 0x10, // Report Size (16) 0x95, 0x02, // Report Count (2) 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) 0xC0, // End Collection 0x75, 0x08, // Report Size (8) 0x95, 0x03, // Report Count (3) 0x81, 0x03, // Input (Cnst,Var,Abs) 0xC0, // End Collection }; static const __u8 *vrc2_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { hid_info(hdev, "fixing up VRC-2 report descriptor\n"); *rsize = sizeof(vrc2_rdesc_fixed); return vrc2_rdesc_fixed; } static int vrc2_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; /* * The device gives us 2 separate USB endpoints. * One of those (the one with report descriptor size of 23) is just bogus so ignore it */ if (hdev->dev_rsize == 23) return -ENODEV; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } return 0; } static const struct hid_device_id vrc2_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_VRC2, USB_DEVICE_ID_VRC2) }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(hid, vrc2_devices); static struct hid_driver vrc2_driver = { .name = "vrc2", .id_table = vrc2_devices, .report_fixup = vrc2_report_fixup, .probe = vrc2_probe, }; module_hid_driver(vrc2_driver); MODULE_AUTHOR("Marcus Folkesson <marcus.folkesson@gmail.com>"); MODULE_DESCRIPTION("HID driver for VRC-2 2-axis Car controller"); MODULE_LICENSE("GPL"); |
2 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 | // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the Diolan u2c-12 USB-I2C adapter * * Copyright (c) 2010-2011 Ericsson AB * * Derived from: * i2c-tiny-usb.c * Copyright (C) 2006-2007 Till Harbaum (Till@Harbaum.org) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/i2c.h> #define DRIVER_NAME "i2c-diolan-u2c" #define USB_VENDOR_ID_DIOLAN 0x0abf #define USB_DEVICE_ID_DIOLAN_U2C 0x3370 /* commands via USB, must match command ids in the firmware */ #define CMD_I2C_READ 0x01 #define CMD_I2C_WRITE 0x02 #define CMD_I2C_SCAN 0x03 /* Returns list of detected devices */ #define CMD_I2C_RELEASE_SDA 0x04 #define CMD_I2C_RELEASE_SCL 0x05 #define CMD_I2C_DROP_SDA 0x06 #define CMD_I2C_DROP_SCL 0x07 #define CMD_I2C_READ_SDA 0x08 #define CMD_I2C_READ_SCL 0x09 #define CMD_GET_FW_VERSION 0x0a #define CMD_GET_SERIAL 0x0b #define CMD_I2C_START 0x0c #define CMD_I2C_STOP 0x0d #define CMD_I2C_REPEATED_START 0x0e #define CMD_I2C_PUT_BYTE 0x0f #define CMD_I2C_GET_BYTE 0x10 #define CMD_I2C_PUT_ACK 0x11 #define CMD_I2C_GET_ACK 0x12 #define CMD_I2C_PUT_BYTE_ACK 0x13 #define CMD_I2C_GET_BYTE_ACK 0x14 #define CMD_I2C_SET_SPEED 0x1b #define CMD_I2C_GET_SPEED 0x1c #define CMD_I2C_SET_CLK_SYNC 0x24 #define CMD_I2C_GET_CLK_SYNC 0x25 #define CMD_I2C_SET_CLK_SYNC_TO 0x26 #define CMD_I2C_GET_CLK_SYNC_TO 0x27 #define RESP_OK 0x00 #define RESP_FAILED 0x01 #define RESP_BAD_MEMADDR 0x04 #define RESP_DATA_ERR 0x05 #define RESP_NOT_IMPLEMENTED 0x06 #define RESP_NACK 0x07 #define RESP_TIMEOUT 0x09 #define U2C_I2C_SPEED_FAST 0 /* 400 kHz */ #define U2C_I2C_SPEED_STD 1 /* 100 kHz */ #define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */ #define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1) #define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10)) #define DIOLAN_USB_TIMEOUT 100 /* in ms */ #define DIOLAN_SYNC_TIMEOUT 20 /* in ms */ #define DIOLAN_OUTBUF_LEN 128 #define DIOLAN_FLUSH_LEN (DIOLAN_OUTBUF_LEN - 4) #define DIOLAN_INBUF_LEN 256 /* Maximum supported receive length */ /* Structure to hold all of our device specific stuff */ struct i2c_diolan_u2c { u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */ u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */ int ep_in, ep_out; /* Endpoints */ struct usb_device *usb_dev; /* the usb device for this device */ struct usb_interface *interface;/* the interface for this device */ struct i2c_adapter adapter; /* i2c related things */ int olen; /* Output buffer length */ int ocount; /* Number of enqueued messages */ }; static uint frequency = I2C_MAX_STANDARD_MODE_FREQ; /* I2C clock frequency in Hz */ module_param(frequency, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz"); /* usb layer */ /* Send command to device, and get response. */ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev) { int ret = 0; int actual; int i; if (!dev->olen || !dev->ocount) return -EINVAL; ret = usb_bulk_msg(dev->usb_dev, usb_sndbulkpipe(dev->usb_dev, dev->ep_out), dev->obuffer, dev->olen, &actual, DIOLAN_USB_TIMEOUT); if (!ret) { for (i = 0; i < dev->ocount; i++) { int tmpret; tmpret = usb_bulk_msg(dev->usb_dev, usb_rcvbulkpipe(dev->usb_dev, dev->ep_in), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); /* * Stop command processing if a previous command * returned an error. * Note that we still need to retrieve all messages. */ if (ret < 0) continue; ret = tmpret; if (ret == 0 && actual > 0) { switch (dev->ibuffer[actual - 1]) { case RESP_NACK: /* * Return ENXIO if NACK was received as * response to the address phase, * EIO otherwise */ ret = i == 1 ? -ENXIO : -EIO; break; case RESP_TIMEOUT: ret = -ETIMEDOUT; break; case RESP_OK: /* strip off return code */ ret = actual - 1; break; default: ret = -EIO; break; } } } } dev->olen = 0; dev->ocount = 0; return ret; } static int diolan_write_cmd(struct i2c_diolan_u2c *dev, bool flush) { if (flush || dev->olen >= DIOLAN_FLUSH_LEN) return diolan_usb_transfer(dev); return 0; } /* Send command (no data) */ static int diolan_usb_cmd(struct i2c_diolan_u2c *dev, u8 command, bool flush) { dev->obuffer[dev->olen++] = command; dev->ocount++; return diolan_write_cmd(dev, flush); } /* Send command with one byte of data */ static int diolan_usb_cmd_data(struct i2c_diolan_u2c *dev, u8 command, u8 data, bool flush) { dev->obuffer[dev->olen++] = command; dev->obuffer[dev->olen++] = data; dev->ocount++; return diolan_write_cmd(dev, flush); } /* Send command with two bytes of data */ static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1, u8 d2, bool flush) { dev->obuffer[dev->olen++] = command; dev->obuffer[dev->olen++] = d1; dev->obuffer[dev->olen++] = d2; dev->ocount++; return diolan_write_cmd(dev, flush); } /* * Flush input queue. * If we don't do this at startup and the controller has queued up * messages which were not retrieved, it will stop responding * at some point. */ static void diolan_flush_input(struct i2c_diolan_u2c *dev) { int i; for (i = 0; i < 10; i++) { int actual = 0; int ret; ret = usb_bulk_msg(dev->usb_dev, usb_rcvbulkpipe(dev->usb_dev, dev->ep_in), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); if (ret < 0 || actual == 0) break; } if (i == 10) dev_err(&dev->interface->dev, "Failed to flush input buffer\n"); } static int diolan_i2c_start(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_START, false); } static int diolan_i2c_repeated_start(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_REPEATED_START, false); } static int diolan_i2c_stop(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_STOP, true); } static int diolan_i2c_get_byte_ack(struct i2c_diolan_u2c *dev, bool ack, u8 *byte) { int ret; ret = diolan_usb_cmd_data(dev, CMD_I2C_GET_BYTE_ACK, ack, true); if (ret > 0) *byte = dev->ibuffer[0]; else if (ret == 0) ret = -EIO; return ret; } static int diolan_i2c_put_byte_ack(struct i2c_diolan_u2c *dev, u8 byte) { return diolan_usb_cmd_data(dev, CMD_I2C_PUT_BYTE_ACK, byte, false); } static int diolan_set_speed(struct i2c_diolan_u2c *dev, u8 speed) { return diolan_usb_cmd_data(dev, CMD_I2C_SET_SPEED, speed, true); } /* Enable or disable clock synchronization (stretching) */ static int diolan_set_clock_synch(struct i2c_diolan_u2c *dev, bool enable) { return diolan_usb_cmd_data(dev, CMD_I2C_SET_CLK_SYNC, enable, true); } /* Set clock synchronization timeout in ms */ static int diolan_set_clock_synch_timeout(struct i2c_diolan_u2c *dev, int ms) { int to_val = ms * 10; return diolan_usb_cmd_data2(dev, CMD_I2C_SET_CLK_SYNC_TO, to_val & 0xff, (to_val >> 8) & 0xff, true); } static void diolan_fw_version(struct i2c_diolan_u2c *dev) { int ret; ret = diolan_usb_cmd(dev, CMD_GET_FW_VERSION, true); if (ret >= 2) dev_info(&dev->interface->dev, "Diolan U2C firmware version %u.%u\n", (unsigned int)dev->ibuffer[0], (unsigned int)dev->ibuffer[1]); } static void diolan_get_serial(struct i2c_diolan_u2c *dev) { int ret; u32 serial; ret = diolan_usb_cmd(dev, CMD_GET_SERIAL, true); if (ret >= 4) { serial = le32_to_cpu(*(u32 *)dev->ibuffer); dev_info(&dev->interface->dev, "Diolan U2C serial number %u\n", serial); } } static int diolan_init(struct i2c_diolan_u2c *dev) { int speed, ret; if (frequency >= 2 * I2C_MAX_STANDARD_MODE_FREQ) { speed = U2C_I2C_SPEED_FAST; frequency = I2C_MAX_FAST_MODE_FREQ; } else if (frequency >= I2C_MAX_STANDARD_MODE_FREQ || frequency == 0) { speed = U2C_I2C_SPEED_STD; frequency = I2C_MAX_STANDARD_MODE_FREQ; } else { speed = U2C_I2C_SPEED(frequency); if (speed > U2C_I2C_SPEED_2KHZ) speed = U2C_I2C_SPEED_2KHZ; frequency = U2C_I2C_FREQ(speed); } dev_info(&dev->interface->dev, "Diolan U2C at USB bus %03d address %03d speed %d Hz\n", dev->usb_dev->bus->busnum, dev->usb_dev->devnum, frequency); diolan_flush_input(dev); diolan_fw_version(dev); diolan_get_serial(dev); /* Set I2C speed */ ret = diolan_set_speed(dev, speed); if (ret < 0) return ret; /* Configure I2C clock synchronization */ ret = diolan_set_clock_synch(dev, speed != U2C_I2C_SPEED_FAST); if (ret < 0) return ret; if (speed != U2C_I2C_SPEED_FAST) ret = diolan_set_clock_synch_timeout(dev, DIOLAN_SYNC_TIMEOUT); return ret; } /* i2c layer */ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct i2c_diolan_u2c *dev = i2c_get_adapdata(adapter); struct i2c_msg *pmsg; int i, j; int ret, sret; ret = diolan_i2c_start(dev); if (ret < 0) return ret; for (i = 0; i < num; i++) { pmsg = &msgs[i]; if (i) { ret = diolan_i2c_repeated_start(dev); if (ret < 0) goto abort; } ret = diolan_i2c_put_byte_ack(dev, i2c_8bit_addr_from_msg(pmsg)); if (ret < 0) goto abort; if (pmsg->flags & I2C_M_RD) { for (j = 0; j < pmsg->len; j++) { u8 byte; bool ack = j < pmsg->len - 1; /* * Don't send NACK if this is the first byte * of a SMBUS_BLOCK message. */ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) ack = true; ret = diolan_i2c_get_byte_ack(dev, ack, &byte); if (ret < 0) goto abort; /* * Adjust count if first received byte is length */ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) { if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX) { ret = -EPROTO; goto abort; } pmsg->len += byte; } pmsg->buf[j] = byte; } } else { for (j = 0; j < pmsg->len; j++) { ret = diolan_i2c_put_byte_ack(dev, pmsg->buf[j]); if (ret < 0) goto abort; } } } ret = num; abort: sret = diolan_i2c_stop(dev); if (sret < 0 && ret >= 0) ret = sret; return ret; } /* * Return list of supported functionality. */ static u32 diolan_usb_func(struct i2c_adapter *a) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm diolan_usb_algorithm = { .xfer = diolan_usb_xfer, .functionality = diolan_usb_func, }; /* device layer */ static const struct usb_device_id diolan_u2c_table[] = { { USB_DEVICE(USB_VENDOR_ID_DIOLAN, USB_DEVICE_ID_DIOLAN_U2C) }, { } }; MODULE_DEVICE_TABLE(usb, diolan_u2c_table); static void diolan_u2c_free(struct i2c_diolan_u2c *dev) { usb_put_dev(dev->usb_dev); kfree(dev); } static int diolan_u2c_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_host_interface *hostif = interface->cur_altsetting; struct i2c_diolan_u2c *dev; int ret; if (hostif->desc.bInterfaceNumber != 0 || hostif->desc.bNumEndpoints < 2) return -ENODEV; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { ret = -ENOMEM; goto error; } dev->ep_out = hostif->endpoint[0].desc.bEndpointAddress; dev->ep_in = hostif->endpoint[1].desc.bEndpointAddress; dev->usb_dev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* setup i2c adapter description */ dev->adapter.owner = THIS_MODULE; dev->adapter.class = I2C_CLASS_HWMON; dev->adapter.algo = &diolan_usb_algorithm; i2c_set_adapdata(&dev->adapter, dev); snprintf(dev->adapter.name, sizeof(dev->adapter.name), DRIVER_NAME " at bus %03d device %03d", dev->usb_dev->bus->busnum, dev->usb_dev->devnum); dev->adapter.dev.parent = &dev->interface->dev; /* initialize diolan i2c interface */ ret = diolan_init(dev); if (ret < 0) { dev_err(&interface->dev, "failed to initialize adapter\n"); goto error_free; } /* and finally attach to i2c layer */ ret = i2c_add_adapter(&dev->adapter); if (ret < 0) goto error_free; dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n"); return 0; error_free: usb_set_intfdata(interface, NULL); diolan_u2c_free(dev); error: return ret; } static void diolan_u2c_disconnect(struct usb_interface *interface) { struct i2c_diolan_u2c *dev = usb_get_intfdata(interface); i2c_del_adapter(&dev->adapter); usb_set_intfdata(interface, NULL); diolan_u2c_free(dev); dev_dbg(&interface->dev, "disconnected\n"); } static struct usb_driver diolan_u2c_driver = { .name = DRIVER_NAME, .probe = diolan_u2c_probe, .disconnect = diolan_u2c_disconnect, .id_table = diolan_u2c_table, }; module_usb_driver(diolan_u2c_driver); MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); MODULE_DESCRIPTION(DRIVER_NAME " driver"); MODULE_LICENSE("GPL"); |
3 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 | // SPDX-License-Identifier: GPL-2.0 /* * Emagic EMI 2|6 usb audio interface firmware loader. * Copyright (C) 2002 * Tapio Laxström (tapio.laxstrom@iptime.fi) * * emi26.c,v 1.13 2002/03/08 13:10:26 tapio Exp */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/ihex.h> #define EMI26_VENDOR_ID 0x086a /* Emagic Soft-und Hardware GmBH */ #define EMI26_PRODUCT_ID 0x0100 /* EMI 2|6 without firmware */ #define EMI26B_PRODUCT_ID 0x0102 /* EMI 2|6 without firmware */ #define ANCHOR_LOAD_INTERNAL 0xA0 /* Vendor specific request code for Anchor Upload/Download (This one is implemented in the core) */ #define ANCHOR_LOAD_EXTERNAL 0xA3 /* This command is not implemented in the core. Requires firmware */ #define ANCHOR_LOAD_FPGA 0xA5 /* This command is not implemented in the core. Requires firmware. Emagic extension */ #define MAX_INTERNAL_ADDRESS 0x1B3F /* This is the highest internal RAM address for the AN2131Q */ #define CPUCS_REG 0x7F92 /* EZ-USB Control and Status Register. Bit 0 controls 8051 reset */ #define INTERNAL_RAM(address) (address <= MAX_INTERNAL_ADDRESS) static int emi26_writememory( struct usb_device *dev, int address, const unsigned char *data, int length, __u8 bRequest); static int emi26_set_reset(struct usb_device *dev, unsigned char reset_bit); static int emi26_load_firmware (struct usb_device *dev); static int emi26_probe(struct usb_interface *intf, const struct usb_device_id *id); static void emi26_disconnect(struct usb_interface *intf); /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi26_writememory (struct usb_device *dev, int address, const unsigned char *data, int length, __u8 request) { int result; unsigned char *buffer = kmemdup(data, length, GFP_KERNEL); if (!buffer) { dev_err(&dev->dev, "kmalloc(%d) failed.\n", length); return -ENOMEM; } /* Note: usb_control_msg returns negative value on error or length of the * data that was written! */ result = usb_control_msg (dev, usb_sndctrlpipe(dev, 0), request, 0x40, address, 0, buffer, length, 300); kfree (buffer); return result; } /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi26_set_reset (struct usb_device *dev, unsigned char reset_bit) { int response; dev_info(&dev->dev, "%s - %d\n", __func__, reset_bit); /* printk(KERN_DEBUG "%s - %d", __func__, reset_bit); */ response = emi26_writememory (dev, CPUCS_REG, &reset_bit, 1, 0xa0); if (response < 0) { dev_err(&dev->dev, "set_reset (%d) failed\n", reset_bit); } return response; } #define FW_LOAD_SIZE 1023 static int emi26_load_firmware (struct usb_device *dev) { const struct firmware *loader_fw = NULL; const struct firmware *bitstream_fw = NULL; const struct firmware *firmware_fw = NULL; const struct ihex_binrec *rec; int err = -ENOMEM; int i; __u32 addr; /* Address to write */ __u8 *buf; buf = kmalloc(FW_LOAD_SIZE, GFP_KERNEL); if (!buf) goto wraperr; err = request_ihex_firmware(&loader_fw, "emi26/loader.fw", &dev->dev); if (err) goto nofw; err = request_ihex_firmware(&bitstream_fw, "emi26/bitstream.fw", &dev->dev); if (err) goto nofw; err = request_ihex_firmware(&firmware_fw, "emi26/firmware.fw", &dev->dev); if (err) { nofw: dev_err(&dev->dev, "%s - request_firmware() failed\n", __func__); goto wraperr; } /* Assert reset (stop the CPU in the EMI) */ err = emi26_set_reset(dev,1); if (err < 0) goto wraperr; rec = (const struct ihex_binrec *)loader_fw->data; /* 1. We need to put the loader for the FPGA into the EZ-USB */ while (rec) { err = emi26_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; rec = ihex_next_binrec(rec); } /* De-assert reset (let the CPU run) */ err = emi26_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ /* 2. We upload the FPGA firmware into the EMI * Note: collect up to 1023 (yes!) bytes and send them with * a single request. This is _much_ faster! */ rec = (const struct ihex_binrec *)bitstream_fw->data; do { i = 0; addr = be32_to_cpu(rec->addr); /* intel hex records are terminated with type 0 element */ while (rec && (i + be16_to_cpu(rec->len) < FW_LOAD_SIZE)) { memcpy(buf + i, rec->data, be16_to_cpu(rec->len)); i += be16_to_cpu(rec->len); rec = ihex_next_binrec(rec); } err = emi26_writememory(dev, addr, buf, i, ANCHOR_LOAD_FPGA); if (err < 0) goto wraperr; } while (rec); /* Assert reset (stop the CPU in the EMI) */ err = emi26_set_reset(dev,1); if (err < 0) goto wraperr; /* 3. We need to put the loader for the firmware into the EZ-USB (again...) */ for (rec = (const struct ihex_binrec *)loader_fw->data; rec; rec = ihex_next_binrec(rec)) { err = emi26_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; } msleep(250); /* let device settle */ /* De-assert reset (let the CPU run) */ err = emi26_set_reset(dev,0); if (err < 0) goto wraperr; /* 4. We put the part of the firmware that lies in the external RAM into the EZ-USB */ for (rec = (const struct ihex_binrec *)firmware_fw->data; rec; rec = ihex_next_binrec(rec)) { if (!INTERNAL_RAM(be32_to_cpu(rec->addr))) { err = emi26_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_EXTERNAL); if (err < 0) goto wraperr; } } /* Assert reset (stop the CPU in the EMI) */ err = emi26_set_reset(dev,1); if (err < 0) goto wraperr; for (rec = (const struct ihex_binrec *)firmware_fw->data; rec; rec = ihex_next_binrec(rec)) { if (INTERNAL_RAM(be32_to_cpu(rec->addr))) { err = emi26_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; } } /* De-assert reset (let the CPU run) */ err = emi26_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ /* return 1 to fail the driver inialization * and give real driver change to load */ err = 1; wraperr: if (err < 0) dev_err(&dev->dev,"%s - error loading firmware: error = %d\n", __func__, err); release_firmware(loader_fw); release_firmware(bitstream_fw); release_firmware(firmware_fw); kfree(buf); return err; } static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) }, { USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, id_table); static int emi26_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); dev_info(&intf->dev, "%s start\n", __func__); emi26_load_firmware(dev); /* do not return the driver context, let real audio driver do that */ return -EIO; } static void emi26_disconnect(struct usb_interface *intf) { } static struct usb_driver emi26_driver = { .name = "emi26 - firmware loader", .probe = emi26_probe, .disconnect = emi26_disconnect, .id_table = id_table, }; module_usb_driver(emi26_driver); MODULE_AUTHOR("Tapio Laxström"); MODULE_DESCRIPTION("Emagic EMI 2|6 firmware loader."); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("emi26/loader.fw"); MODULE_FIRMWARE("emi26/bitstream.fw"); MODULE_FIRMWARE("emi26/firmware.fw"); /* vi:ai:syntax=c:sw=8:ts=8:tw=80 */ |
1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Logitech Flight System G940 * * Copyright (c) 2009 Gary Stein <LordCnidarian@gmail.com> */ /* */ #include <linux/input.h> #include <linux/hid.h> #include "hid-lg.h" /* * G940 Theory of Operation (from experimentation) * * There are 63 fields (only 3 of them currently used) * 0 - seems to be command field * 1 - 30 deal with the x axis * 31 -60 deal with the y axis * * Field 1 is x axis constant force * Field 31 is y axis constant force * * other interesting fields 1,2,3,4 on x axis * (same for 31,32,33,34 on y axis) * * 0 0 127 127 makes the joystick autocenter hard * * 127 0 127 127 makes the joystick loose on the right, * but stops all movemnt left * * -127 0 -127 -127 makes the joystick loose on the left, * but stops all movement right * * 0 0 -127 -127 makes the joystick rattle very hard * * I'm sure these are effects that I don't know enough about them */ static int hid_lg3ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); int x, y; /* * Available values in the field should always be 63, but we only use up to * 35. Instead, clear the entire area, however big it is. */ memset(report->field[0]->value, 0, sizeof(__s32) * report->field[0]->report_count); switch (effect->type) { case FF_CONSTANT: /* * Already clamped in ff_memless * 0 is center (different then other logitech) */ x = effect->u.ramp.start_level; y = effect->u.ramp.end_level; /* send command byte */ report->field[0]->value[0] = 0x51; /* * Sign backwards from other Force3d pro * which get recast here in two's complement 8 bits */ report->field[0]->value[1] = (unsigned char)(-x); report->field[0]->value[31] = (unsigned char)(-y); hid_hw_request(hid, report, HID_REQ_SET_REPORT); break; } return 0; } static void hid_lg3ff_set_autocenter(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); /* * Auto Centering probed from device * NOTE: deadman's switch on G940 must be covered * for effects to work */ report->field[0]->value[0] = 0x51; report->field[0]->value[1] = 0x00; report->field[0]->value[2] = 0x00; report->field[0]->value[3] = 0x7F; report->field[0]->value[4] = 0x7F; report->field[0]->value[31] = 0x00; report->field[0]->value[32] = 0x00; report->field[0]->value[33] = 0x7F; report->field[0]->value[34] = 0x7F; hid_hw_request(hid, report, HID_REQ_SET_REPORT); } static const signed short ff3_joystick_ac[] = { FF_CONSTANT, FF_AUTOCENTER, -1 }; int lg3ff_init(struct hid_device *hid) { struct hid_input *hidinput; struct input_dev *dev; const signed short *ff_bits = ff3_joystick_ac; int error; int i; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35)) return -ENODEV; /* Assume single fixed device G940 */ for (i = 0; ff_bits[i] >= 0; i++) set_bit(ff_bits[i], dev->ffbit); error = input_ff_create_memless(dev, NULL, hid_lg3ff_play); if (error) return error; if (test_bit(FF_AUTOCENTER, dev->ffbit)) dev->ff->set_autocenter = hid_lg3ff_set_autocenter; hid_info(hid, "Force feedback for Logitech Flight System G940 by Gary Stein <LordCnidarian@gmail.com>\n"); return 0; } |
114 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2023 Isovalent */ #include <linux/bpf.h> #include <linux/bpf_mprog.h> #include <linux/netdevice.h> #include <net/tcx.h> int tcx_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) { bool created, ingress = attr->attach_type == BPF_TCX_INGRESS; struct net *net = current->nsproxy->net_ns; struct bpf_mprog_entry *entry, *entry_new; struct bpf_prog *replace_prog = NULL; struct net_device *dev; int ret; rtnl_lock(); dev = __dev_get_by_index(net, attr->target_ifindex); if (!dev) { ret = -ENODEV; goto out; } if (attr->attach_flags & BPF_F_REPLACE) { replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, prog->type); if (IS_ERR(replace_prog)) { ret = PTR_ERR(replace_prog); replace_prog = NULL; goto out; } } entry = tcx_entry_fetch_or_create(dev, ingress, &created); if (!entry) { ret = -ENOMEM; goto out; } ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog, attr->attach_flags, attr->relative_fd, attr->expected_revision); if (!ret) { if (entry != entry_new) { tcx_entry_update(dev, entry_new, ingress); tcx_entry_sync(); tcx_skeys_inc(ingress); } bpf_mprog_commit(entry); } else if (created) { tcx_entry_free(entry); } out: if (replace_prog) bpf_prog_put(replace_prog); rtnl_unlock(); return ret; } int tcx_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog) { bool ingress = attr->attach_type == BPF_TCX_INGRESS; struct net *net = current->nsproxy->net_ns; struct bpf_mprog_entry *entry, *entry_new; struct net_device *dev; int ret; rtnl_lock(); dev = __dev_get_by_index(net, attr->target_ifindex); if (!dev) { ret = -ENODEV; goto out; } entry = tcx_entry_fetch(dev, ingress); if (!entry) { ret = -ENOENT; goto out; } ret = bpf_mprog_detach(entry, &entry_new, prog, NULL, attr->attach_flags, attr->relative_fd, attr->expected_revision); if (!ret) { if (!tcx_entry_is_active(entry_new)) entry_new = NULL; tcx_entry_update(dev, entry_new, ingress); tcx_entry_sync(); tcx_skeys_dec(ingress); bpf_mprog_commit(entry); if (!entry_new) tcx_entry_free(entry); } out: rtnl_unlock(); return ret; } void tcx_uninstall(struct net_device *dev, bool ingress) { struct bpf_mprog_entry *entry, *entry_new = NULL; struct bpf_tuple tuple = {}; struct bpf_mprog_fp *fp; struct bpf_mprog_cp *cp; bool active; entry = tcx_entry_fetch(dev, ingress); if (!entry) return; active = tcx_entry(entry)->miniq_active; if (active) bpf_mprog_clear_all(entry, &entry_new); tcx_entry_update(dev, entry_new, ingress); tcx_entry_sync(); bpf_mprog_foreach_tuple(entry, fp, cp, tuple) { if (tuple.link) tcx_link(tuple.link)->dev = NULL; else bpf_prog_put(tuple.prog); tcx_skeys_dec(ingress); } if (!active) tcx_entry_free(entry); } int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { bool ingress = attr->query.attach_type == BPF_TCX_INGRESS; struct net *net = current->nsproxy->net_ns; struct net_device *dev; int ret; rtnl_lock(); dev = __dev_get_by_index(net, attr->query.target_ifindex); if (!dev) { ret = -ENODEV; goto out; } ret = bpf_mprog_query(attr, uattr, tcx_entry_fetch(dev, ingress)); out: rtnl_unlock(); return ret; } static int tcx_link_prog_attach(struct bpf_link *link, u32 flags, u32 id_or_fd, u64 revision) { struct tcx_link *tcx = tcx_link(link); bool created, ingress = tcx->location == BPF_TCX_INGRESS; struct bpf_mprog_entry *entry, *entry_new; struct net_device *dev = tcx->dev; int ret; ASSERT_RTNL(); entry = tcx_entry_fetch_or_create(dev, ingress, &created); if (!entry) return -ENOMEM; ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags, id_or_fd, revision); if (!ret) { if (entry != entry_new) { tcx_entry_update(dev, entry_new, ingress); tcx_entry_sync(); tcx_skeys_inc(ingress); } bpf_mprog_commit(entry); } else if (created) { tcx_entry_free(entry); } return ret; } static void tcx_link_release(struct bpf_link *link) { struct tcx_link *tcx = tcx_link(link); bool ingress = tcx->location == BPF_TCX_INGRESS; struct bpf_mprog_entry *entry, *entry_new; struct net_device *dev; int ret = 0; rtnl_lock(); dev = tcx->dev; if (!dev) goto out; entry = tcx_entry_fetch(dev, ingress); if (!entry) { ret = -ENOENT; goto out; } ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0); if (!ret) { if (!tcx_entry_is_active(entry_new)) entry_new = NULL; tcx_entry_update(dev, entry_new, ingress); tcx_entry_sync(); tcx_skeys_dec(ingress); bpf_mprog_commit(entry); if (!entry_new) tcx_entry_free(entry); tcx->dev = NULL; } out: WARN_ON_ONCE(ret); rtnl_unlock(); } static int tcx_link_update(struct bpf_link *link, struct bpf_prog *nprog, struct bpf_prog *oprog) { struct tcx_link *tcx = tcx_link(link); bool ingress = tcx->location == BPF_TCX_INGRESS; struct bpf_mprog_entry *entry, *entry_new; struct net_device *dev; int ret = 0; rtnl_lock(); dev = tcx->dev; if (!dev) { ret = -ENOLINK; goto out; } if (oprog && link->prog != oprog) { ret = -EPERM; goto out; } oprog = link->prog; if (oprog == nprog) { bpf_prog_put(nprog); goto out; } entry = tcx_entry_fetch(dev, ingress); if (!entry) { ret = -ENOENT; goto out; } ret = bpf_mprog_attach(entry, &entry_new, nprog, link, oprog, BPF_F_REPLACE | BPF_F_ID, link->prog->aux->id, 0); if (!ret) { WARN_ON_ONCE(entry != entry_new); oprog = xchg(&link->prog, nprog); bpf_prog_put(oprog); bpf_mprog_commit(entry); } out: rtnl_unlock(); return ret; } static void tcx_link_dealloc(struct bpf_link *link) { kfree(tcx_link(link)); } static void tcx_link_fdinfo(const struct bpf_link *link, struct seq_file *seq) { const struct tcx_link *tcx = tcx_link(link); u32 ifindex = 0; rtnl_lock(); if (tcx->dev) ifindex = tcx->dev->ifindex; rtnl_unlock(); seq_printf(seq, "ifindex:\t%u\n", ifindex); seq_printf(seq, "attach_type:\t%u (%s)\n", tcx->location, tcx->location == BPF_TCX_INGRESS ? "ingress" : "egress"); } static int tcx_link_fill_info(const struct bpf_link *link, struct bpf_link_info *info) { const struct tcx_link *tcx = tcx_link(link); u32 ifindex = 0; rtnl_lock(); if (tcx->dev) ifindex = tcx->dev->ifindex; rtnl_unlock(); info->tcx.ifindex = ifindex; info->tcx.attach_type = tcx->location; return 0; } static int tcx_link_detach(struct bpf_link *link) { tcx_link_release(link); return 0; } static const struct bpf_link_ops tcx_link_lops = { .release = tcx_link_release, .detach = tcx_link_detach, .dealloc = tcx_link_dealloc, .update_prog = tcx_link_update, .show_fdinfo = tcx_link_fdinfo, .fill_link_info = tcx_link_fill_info, }; static int tcx_link_init(struct tcx_link *tcx, struct bpf_link_primer *link_primer, const union bpf_attr *attr, struct net_device *dev, struct bpf_prog *prog) { bpf_link_init(&tcx->link, BPF_LINK_TYPE_TCX, &tcx_link_lops, prog); tcx->location = attr->link_create.attach_type; tcx->dev = dev; return bpf_link_prime(&tcx->link, link_primer); } int tcx_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { struct net *net = current->nsproxy->net_ns; struct bpf_link_primer link_primer; struct net_device *dev; struct tcx_link *tcx; int ret; rtnl_lock(); dev = __dev_get_by_index(net, attr->link_create.target_ifindex); if (!dev) { ret = -ENODEV; goto out; } tcx = kzalloc(sizeof(*tcx), GFP_USER); if (!tcx) { ret = -ENOMEM; goto out; } ret = tcx_link_init(tcx, &link_primer, attr, dev, prog); if (ret) { kfree(tcx); goto out; } ret = tcx_link_prog_attach(&tcx->link, attr->link_create.flags, attr->link_create.tcx.relative_fd, attr->link_create.tcx.expected_revision); if (ret) { tcx->dev = NULL; bpf_link_cleanup(&link_primer); goto out; } ret = bpf_link_settle(&link_primer); out: rtnl_unlock(); return ret; } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 | /* * Copyright (C) 2014 Red Hat * Copyright (C) 2014 Intel Corp. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rob Clark <robdclark@gmail.com> * Daniel Vetter <daniel.vetter@ffwll.ch> */ #ifndef DRM_ATOMIC_H_ #define DRM_ATOMIC_H_ #include <drm/drm_crtc.h> #include <drm/drm_util.h> /** * struct drm_crtc_commit - track modeset commits on a CRTC * * This structure is used to track pending modeset changes and atomic commit on * a per-CRTC basis. Since updating the list should never block, this structure * is reference counted to allow waiters to safely wait on an event to complete, * without holding any locks. * * It has 3 different events in total to allow a fine-grained synchronization * between outstanding updates:: * * atomic commit thread hardware * * write new state into hardware ----> ... * signal hw_done * switch to new state on next * ... v/hblank * * wait for buffers to show up ... * * ... send completion irq * irq handler signals flip_done * cleanup old buffers * * signal cleanup_done * * wait for flip_done <---- * clean up atomic state * * The important bit to know is that &cleanup_done is the terminal event, but the * ordering between &flip_done and &hw_done is entirely up to the specific driver * and modeset state change. * * For an implementation of how to use this look at * drm_atomic_helper_setup_commit() from the atomic helper library. * * See also drm_crtc_commit_wait(). */ struct drm_crtc_commit { /** * @crtc: * * DRM CRTC for this commit. */ struct drm_crtc *crtc; /** * @ref: * * Reference count for this structure. Needed to allow blocking on * completions without the risk of the completion disappearing * meanwhile. */ struct kref ref; /** * @flip_done: * * Will be signaled when the hardware has flipped to the new set of * buffers. Signals at the same time as when the drm event for this * commit is sent to userspace, or when an out-fence is singalled. Note * that for most hardware, in most cases this happens after @hw_done is * signalled. * * Completion of this stage is signalled implicitly by calling * drm_crtc_send_vblank_event() on &drm_crtc_state.event. */ struct completion flip_done; /** * @hw_done: * * Will be signalled when all hw register changes for this commit have * been written out. Especially when disabling a pipe this can be much * later than @flip_done, since that can signal already when the * screen goes black, whereas to fully shut down a pipe more register * I/O is required. * * Note that this does not need to include separately reference-counted * resources like backing storage buffer pinning, or runtime pm * management. * * Drivers should call drm_atomic_helper_commit_hw_done() to signal * completion of this stage. */ struct completion hw_done; /** * @cleanup_done: * * Will be signalled after old buffers have been cleaned up by calling * drm_atomic_helper_cleanup_planes(). Since this can only happen after * a vblank wait completed it might be a bit later. This completion is * useful to throttle updates and avoid hardware updates getting ahead * of the buffer cleanup too much. * * Drivers should call drm_atomic_helper_commit_cleanup_done() to signal * completion of this stage. */ struct completion cleanup_done; /** * @commit_entry: * * Entry on the per-CRTC &drm_crtc.commit_list. Protected by * $drm_crtc.commit_lock. */ struct list_head commit_entry; /** * @event: * * &drm_pending_vblank_event pointer to clean up private events. */ struct drm_pending_vblank_event *event; /** * @abort_completion: * * A flag that's set after drm_atomic_helper_setup_commit() takes a * second reference for the completion of $drm_crtc_state.event. It's * used by the free code to remove the second reference if commit fails. */ bool abort_completion; }; struct __drm_planes_state { struct drm_plane *ptr; struct drm_plane_state *state, *old_state, *new_state; }; struct __drm_crtcs_state { struct drm_crtc *ptr; struct drm_crtc_state *state, *old_state, *new_state; /** * @commit: * * A reference to the CRTC commit object that is kept for use by * drm_atomic_helper_wait_for_flip_done() after * drm_atomic_helper_commit_hw_done() is called. This ensures that a * concurrent commit won't free a commit object that is still in use. */ struct drm_crtc_commit *commit; s32 __user *out_fence_ptr; u64 last_vblank_count; }; struct __drm_connnectors_state { struct drm_connector *ptr; struct drm_connector_state *state, *old_state, *new_state; /** * @out_fence_ptr: * * User-provided pointer which the kernel uses to return a sync_file * file descriptor. Used by writeback connectors to signal completion of * the writeback. */ s32 __user *out_fence_ptr; }; struct drm_private_obj; struct drm_private_state; /** * struct drm_private_state_funcs - atomic state functions for private objects * * These hooks are used by atomic helpers to create, swap and destroy states of * private objects. The structure itself is used as a vtable to identify the * associated private object type. Each private object type that needs to be * added to the atomic states is expected to have an implementation of these * hooks and pass a pointer to its drm_private_state_funcs struct to * drm_atomic_get_private_obj_state(). */ struct drm_private_state_funcs { /** * @atomic_duplicate_state: * * Duplicate the current state of the private object and return it. It * is an error to call this before obj->state has been initialized. * * RETURNS: * * Duplicated atomic state or NULL when obj->state is not * initialized or allocation failed. */ struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj); /** * @atomic_destroy_state: * * Frees the private object state created with @atomic_duplicate_state. */ void (*atomic_destroy_state)(struct drm_private_obj *obj, struct drm_private_state *state); /** * @atomic_print_state: * * If driver subclasses &struct drm_private_state, it should implement * this optional hook for printing additional driver specific state. * * Do not call this directly, use drm_atomic_private_obj_print_state() * instead. */ void (*atomic_print_state)(struct drm_printer *p, const struct drm_private_state *state); }; /** * struct drm_private_obj - base struct for driver private atomic object * * A driver private object is initialized by calling * drm_atomic_private_obj_init() and cleaned up by calling * drm_atomic_private_obj_fini(). * * Currently only tracks the state update functions and the opaque driver * private state itself, but in the future might also track which * &drm_modeset_lock is required to duplicate and update this object's state. * * All private objects must be initialized before the DRM device they are * attached to is registered to the DRM subsystem (call to drm_dev_register()) * and should stay around until this DRM device is unregistered (call to * drm_dev_unregister()). In other words, private objects lifetime is tied * to the DRM device lifetime. This implies that: * * 1/ all calls to drm_atomic_private_obj_init() must be done before calling * drm_dev_register() * 2/ all calls to drm_atomic_private_obj_fini() must be done after calling * drm_dev_unregister() * * If that private object is used to store a state shared by multiple * CRTCs, proper care must be taken to ensure that non-blocking commits are * properly ordered to avoid a use-after-free issue. * * Indeed, assuming a sequence of two non-blocking &drm_atomic_commit on two * different &drm_crtc using different &drm_plane and &drm_connector, so with no * resources shared, there's no guarantee on which commit is going to happen * first. However, the second &drm_atomic_commit will consider the first * &drm_private_obj its old state, and will be in charge of freeing it whenever * the second &drm_atomic_commit is done. * * If the first &drm_atomic_commit happens after it, it will consider its * &drm_private_obj the new state and will be likely to access it, resulting in * an access to a freed memory region. Drivers should store (and get a reference * to) the &drm_crtc_commit structure in our private state in * &drm_mode_config_helper_funcs.atomic_commit_setup, and then wait for that * commit to complete as the first step of * &drm_mode_config_helper_funcs.atomic_commit_tail, similar to * drm_atomic_helper_wait_for_dependencies(). */ struct drm_private_obj { /** * @head: List entry used to attach a private object to a &drm_device * (queued to &drm_mode_config.privobj_list). */ struct list_head head; /** * @lock: Modeset lock to protect the state object. */ struct drm_modeset_lock lock; /** * @state: Current atomic state for this driver private object. */ struct drm_private_state *state; /** * @funcs: * * Functions to manipulate the state of this driver private object, see * &drm_private_state_funcs. */ const struct drm_private_state_funcs *funcs; }; /** * drm_for_each_privobj() - private object iterator * * @privobj: pointer to the current private object. Updated after each * iteration * @dev: the DRM device we want get private objects from * * Allows one to iterate over all private objects attached to @dev */ #define drm_for_each_privobj(privobj, dev) \ list_for_each_entry(privobj, &(dev)->mode_config.privobj_list, head) /** * struct drm_private_state - base struct for driver private object state * * Currently only contains a backpointer to the overall atomic update, * and the relevant private object but in the future also might hold * synchronization information similar to e.g. &drm_crtc.commit. */ struct drm_private_state { /** * @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; /** * @obj: backpointer to the private object */ struct drm_private_obj *obj; }; struct __drm_private_objs_state { struct drm_private_obj *ptr; struct drm_private_state *state, *old_state, *new_state; }; /** * struct drm_atomic_state - Atomic commit structure * * This structure is the kernel counterpart of @drm_mode_atomic and represents * an atomic commit that transitions from an old to a new display state. It * contains all the objects affected by the atomic commit and both the new * state structures and pointers to the old state structures for * these. * * States are added to an atomic update by calling drm_atomic_get_crtc_state(), * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for * private state structures, drm_atomic_get_private_obj_state(). * * NOTE: struct drm_atomic_state first started as a single collection of * entities state pointers (drm_plane_state, drm_crtc_state, etc.). * * At atomic_check time, you could get the state about to be committed * from drm_atomic_state, and the one currently running from the * entities state pointer (drm_crtc.state, for example). After the call * to drm_atomic_helper_swap_state(), the entities state pointer would * contain the state previously checked, and the drm_atomic_state * structure the old state. * * Over time, and in order to avoid confusion, drm_atomic_state has * grown to have both the old state (ie, the state we replace) and the * new state (ie, the state we want to apply). Those names are stable * during the commit process, which makes it easier to reason about. * * You can still find some traces of that evolution through some hooks * or callbacks taking a drm_atomic_state parameter called names like * "old_state". This doesn't necessarily mean that the previous * drm_atomic_state is passed, but rather that this used to be the state * collection we were replacing after drm_atomic_helper_swap_state(), * but the variable name was never updated. * * Some atomic operations implementations followed a similar process. We * first started to pass the entity state only. However, it was pretty * cumbersome for drivers, and especially CRTCs, to retrieve the states * of other components. Thus, we switched to passing the whole * drm_atomic_state as a parameter to those operations. Similarly, the * transition isn't complete yet, and one might still find atomic * operations taking a drm_atomic_state pointer, or a component state * pointer. The former is the preferred form. */ struct drm_atomic_state { /** * @ref: * * Count of all references to this update (will not be freed until zero). */ struct kref ref; /** * @dev: Parent DRM Device. */ struct drm_device *dev; /** * @allow_modeset: * * Allow full modeset. This is used by the ATOMIC IOCTL handler to * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should * generally not consult this flag, but instead look at the output of * drm_atomic_crtc_needs_modeset(). The detailed rules are: * * - Drivers must not consult @allow_modeset in the atomic commit path. * Use drm_atomic_crtc_needs_modeset() instead. * * - Drivers must consult @allow_modeset before adding unrelated struct * drm_crtc_state to this commit by calling * drm_atomic_get_crtc_state(). See also the warning in the * documentation for that function. * * - Drivers must never change this flag, it is under the exclusive * control of userspace. * * - Drivers may consult @allow_modeset in the atomic check path, if * they have the choice between an optimal hardware configuration * which requires a modeset, and a less optimal configuration which * can be committed without a modeset. An example would be suboptimal * scanout FIFO allocation resulting in increased idle power * consumption. This allows userspace to avoid flickering and delays * for the normal composition loop at reasonable cost. */ bool allow_modeset : 1; /** * @legacy_cursor_update: * * Hint to enforce legacy cursor IOCTL semantics. * * WARNING: This is thoroughly broken and pretty much impossible to * implement correctly. Drivers must ignore this and should instead * implement &drm_plane_helper_funcs.atomic_async_check and * &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this * flag are not allowed. */ bool legacy_cursor_update : 1; /** * @async_update: hint for asynchronous plane update */ bool async_update : 1; /** * @duplicated: * * Indicates whether or not this atomic state was duplicated using * drm_atomic_helper_duplicate_state(). Drivers and atomic helpers * should use this to fixup normal inconsistencies in duplicated * states. */ bool duplicated : 1; /** * @planes: * * Pointer to array of @drm_plane and @drm_plane_state part of this * update. */ struct __drm_planes_state *planes; /** * @crtcs: * * Pointer to array of @drm_crtc and @drm_crtc_state part of this * update. */ struct __drm_crtcs_state *crtcs; /** * @num_connector: size of the @connectors array */ int num_connector; /** * @connectors: * * Pointer to array of @drm_connector and @drm_connector_state part of * this update. */ struct __drm_connnectors_state *connectors; /** * @num_private_objs: size of the @private_objs array */ int num_private_objs; /** * @private_objs: * * Pointer to array of @drm_private_obj and @drm_private_obj_state part * of this update. */ struct __drm_private_objs_state *private_objs; /** * @acquire_ctx: acquire context for this atomic modeset state update */ struct drm_modeset_acquire_ctx *acquire_ctx; /** * @fake_commit: * * Used for signaling unbound planes/connectors. * When a connector or plane is not bound to any CRTC, it's still important * to preserve linearity to prevent the atomic states from being freed too early. * * This commit (if set) is not bound to any CRTC, but will be completed when * drm_atomic_helper_commit_hw_done() is called. */ struct drm_crtc_commit *fake_commit; /** * @commit_work: * * Work item which can be used by the driver or helpers to execute the * commit without blocking. */ struct work_struct commit_work; }; void __drm_crtc_commit_free(struct kref *kref); /** * drm_crtc_commit_get - acquire a reference to the CRTC commit * @commit: CRTC commit * * Increases the reference of @commit. * * Returns: * The pointer to @commit, with reference increased. */ static inline struct drm_crtc_commit *drm_crtc_commit_get(struct drm_crtc_commit *commit) { kref_get(&commit->ref); return commit; } /** * drm_crtc_commit_put - release a reference to the CRTC commmit * @commit: CRTC commit * * This releases a reference to @commit which is freed after removing the * final reference. No locking required and callable from any context. */ static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit) { kref_put(&commit->ref, __drm_crtc_commit_free); } int drm_crtc_commit_wait(struct drm_crtc_commit *commit); struct drm_atomic_state * __must_check drm_atomic_state_alloc(struct drm_device *dev); void drm_atomic_state_clear(struct drm_atomic_state *state); /** * drm_atomic_state_get - acquire a reference to the atomic state * @state: The atomic state * * Returns a new reference to the @state */ static inline struct drm_atomic_state * drm_atomic_state_get(struct drm_atomic_state *state) { kref_get(&state->ref); return state; } void __drm_atomic_state_free(struct kref *ref); /** * drm_atomic_state_put - release a reference to the atomic state * @state: The atomic state * * This releases a reference to @state which is freed after removing the * final reference. No locking required and callable from any context. */ static inline void drm_atomic_state_put(struct drm_atomic_state *state) { kref_put(&state->ref, __drm_atomic_state_free); } int __must_check drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_state_default_clear(struct drm_atomic_state *state); void drm_atomic_state_default_release(struct drm_atomic_state *state); struct drm_crtc_state * __must_check drm_atomic_get_crtc_state(struct drm_atomic_state *state, struct drm_crtc *crtc); struct drm_plane_state * __must_check drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane *plane); struct drm_connector_state * __must_check drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector *connector); void drm_atomic_private_obj_init(struct drm_device *dev, struct drm_private_obj *obj, struct drm_private_state *state, const struct drm_private_state_funcs *funcs); void drm_atomic_private_obj_fini(struct drm_private_obj *obj); struct drm_private_state * __must_check drm_atomic_get_private_obj_state(struct drm_atomic_state *state, struct drm_private_obj *obj); struct drm_private_state * drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state, struct drm_private_obj *obj); struct drm_private_state * drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state, struct drm_private_obj *obj); struct drm_connector * drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state, struct drm_encoder *encoder); struct drm_connector * drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state, struct drm_encoder *encoder); struct drm_connector * drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); struct drm_crtc * drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state, struct drm_encoder *encoder); struct drm_crtc * drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state, struct drm_encoder *encoder); /** * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists * @state: global atomic state object * @crtc: CRTC to grab * * This function returns the CRTC state for the given CRTC, or NULL * if the CRTC is not part of the global atomic state. * * This function is deprecated, @drm_atomic_get_old_crtc_state or * @drm_atomic_get_new_crtc_state should be used instead. */ static inline struct drm_crtc_state * drm_atomic_get_existing_crtc_state(const struct drm_atomic_state *state, struct drm_crtc *crtc) { return state->crtcs[drm_crtc_index(crtc)].state; } /** * drm_atomic_get_old_crtc_state - get old CRTC state, if it exists * @state: global atomic state object * @crtc: CRTC to grab * * This function returns the old CRTC state for the given CRTC, or * NULL if the CRTC is not part of the global atomic state. */ static inline struct drm_crtc_state * drm_atomic_get_old_crtc_state(const struct drm_atomic_state *state, struct drm_crtc *crtc) { return state->crtcs[drm_crtc_index(crtc)].old_state; } /** * drm_atomic_get_new_crtc_state - get new CRTC state, if it exists * @state: global atomic state object * @crtc: CRTC to grab * * This function returns the new CRTC state for the given CRTC, or * NULL if the CRTC is not part of the global atomic state. */ static inline struct drm_crtc_state * drm_atomic_get_new_crtc_state(const struct drm_atomic_state *state, struct drm_crtc *crtc) { return state->crtcs[drm_crtc_index(crtc)].new_state; } /** * drm_atomic_get_existing_plane_state - get plane state, if it exists * @state: global atomic state object * @plane: plane to grab * * This function returns the plane state for the given plane, or NULL * if the plane is not part of the global atomic state. * * This function is deprecated, @drm_atomic_get_old_plane_state or * @drm_atomic_get_new_plane_state should be used instead. */ static inline struct drm_plane_state * drm_atomic_get_existing_plane_state(const struct drm_atomic_state *state, struct drm_plane *plane) { return state->planes[drm_plane_index(plane)].state; } /** * drm_atomic_get_old_plane_state - get plane state, if it exists * @state: global atomic state object * @plane: plane to grab * * This function returns the old plane state for the given plane, or * NULL if the plane is not part of the global atomic state. */ static inline struct drm_plane_state * drm_atomic_get_old_plane_state(const struct drm_atomic_state *state, struct drm_plane *plane) { return state->planes[drm_plane_index(plane)].old_state; } /** * drm_atomic_get_new_plane_state - get plane state, if it exists * @state: global atomic state object * @plane: plane to grab * * This function returns the new plane state for the given plane, or * NULL if the plane is not part of the global atomic state. */ static inline struct drm_plane_state * drm_atomic_get_new_plane_state(const struct drm_atomic_state *state, struct drm_plane *plane) { return state->planes[drm_plane_index(plane)].new_state; } /** * drm_atomic_get_existing_connector_state - get connector state, if it exists * @state: global atomic state object * @connector: connector to grab * * This function returns the connector state for the given connector, * or NULL if the connector is not part of the global atomic state. * * This function is deprecated, @drm_atomic_get_old_connector_state or * @drm_atomic_get_new_connector_state should be used instead. */ static inline struct drm_connector_state * drm_atomic_get_existing_connector_state(const struct drm_atomic_state *state, struct drm_connector *connector) { int index = drm_connector_index(connector); if (index >= state->num_connector) return NULL; return state->connectors[index].state; } /** * drm_atomic_get_old_connector_state - get connector state, if it exists * @state: global atomic state object * @connector: connector to grab * * This function returns the old connector state for the given connector, * or NULL if the connector is not part of the global atomic state. */ static inline struct drm_connector_state * drm_atomic_get_old_connector_state(const struct drm_atomic_state *state, struct drm_connector *connector) { int index = drm_connector_index(connector); if (index >= state->num_connector) return NULL; return state->connectors[index].old_state; } /** * drm_atomic_get_new_connector_state - get connector state, if it exists * @state: global atomic state object * @connector: connector to grab * * This function returns the new connector state for the given connector, * or NULL if the connector is not part of the global atomic state. */ static inline struct drm_connector_state * drm_atomic_get_new_connector_state(const struct drm_atomic_state *state, struct drm_connector *connector) { int index = drm_connector_index(connector); if (index >= state->num_connector) return NULL; return state->connectors[index].new_state; } /** * __drm_atomic_get_current_plane_state - get current plane state * @state: global atomic state object * @plane: plane to grab * * This function returns the plane state for the given plane, either from * @state, or if the plane isn't part of the atomic state update, from @plane. * This is useful in atomic check callbacks, when drivers need to peek at, but * not change, state of other planes, since it avoids threading an error code * back up the call chain. * * WARNING: * * Note that this function is in general unsafe since it doesn't check for the * required locking for access state structures. Drivers must ensure that it is * safe to access the returned state structure through other means. One common * example is when planes are fixed to a single CRTC, and the driver knows that * the CRTC lock is held already. In that case holding the CRTC lock gives a * read-lock on all planes connected to that CRTC. But if planes can be * reassigned things get more tricky. In that case it's better to use * drm_atomic_get_plane_state and wire up full error handling. * * Returns: * * Read-only pointer to the current plane state. */ static inline const struct drm_plane_state * __drm_atomic_get_current_plane_state(const struct drm_atomic_state *state, struct drm_plane *plane) { if (state->planes[drm_plane_index(plane)].state) return state->planes[drm_plane_index(plane)].state; return plane->state; } int __must_check drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, struct drm_encoder *encoder); int __must_check drm_atomic_add_affected_connectors(struct drm_atomic_state *state, struct drm_crtc *crtc); int __must_check drm_atomic_add_affected_planes(struct drm_atomic_state *state, struct drm_crtc *crtc); int __must_check drm_atomic_check_only(struct drm_atomic_state *state); int __must_check drm_atomic_commit(struct drm_atomic_state *state); int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); void drm_state_dump(struct drm_device *dev, struct drm_printer *p); /** * for_each_oldnew_connector_in_state - iterate over all connectors in an atomic update * @__state: &struct drm_atomic_state pointer * @connector: &struct drm_connector iteration cursor * @old_connector_state: &struct drm_connector_state iteration cursor for the * old state * @new_connector_state: &struct drm_connector_state iteration cursor for the * new state * @__i: int iteration cursor, for macro-internal use * * This iterates over all connectors in an atomic update, tracking both old and * new state. This is useful in places where the state delta needs to be * considered, for example in atomic check functions. */ #define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_connector; \ (__i)++) \ for_each_if ((__state)->connectors[__i].ptr && \ ((connector) = (__state)->connectors[__i].ptr, \ (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ (old_connector_state) = (__state)->connectors[__i].old_state, \ (new_connector_state) = (__state)->connectors[__i].new_state, 1)) /** * for_each_old_connector_in_state - iterate over all connectors in an atomic update * @__state: &struct drm_atomic_state pointer * @connector: &struct drm_connector iteration cursor * @old_connector_state: &struct drm_connector_state iteration cursor for the * old state * @__i: int iteration cursor, for macro-internal use * * This iterates over all connectors in an atomic update, tracking only the old * state. This is useful in disable functions, where we need the old state the * hardware is still in. */ #define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_connector; \ (__i)++) \ for_each_if ((__state)->connectors[__i].ptr && \ ((connector) = (__state)->connectors[__i].ptr, \ (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ (old_connector_state) = (__state)->connectors[__i].old_state, 1)) /** * for_each_new_connector_in_state - iterate over all connectors in an atomic update * @__state: &struct drm_atomic_state pointer * @connector: &struct drm_connector iteration cursor * @new_connector_state: &struct drm_connector_state iteration cursor for the * new state * @__i: int iteration cursor, for macro-internal use * * This iterates over all connectors in an atomic update, tracking only the new * state. This is useful in enable functions, where we need the new state the * hardware should be in when the atomic commit operation has completed. */ #define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_connector; \ (__i)++) \ for_each_if ((__state)->connectors[__i].ptr && \ ((connector) = (__state)->connectors[__i].ptr, \ (void)(connector) /* Only to avoid unused-but-set-variable warning */, \ (new_connector_state) = (__state)->connectors[__i].new_state, \ (void)(new_connector_state) /* Only to avoid unused-but-set-variable warning */, 1)) /** * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update * @__state: &struct drm_atomic_state pointer * @crtc: &struct drm_crtc iteration cursor * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use * * This iterates over all CRTCs in an atomic update, tracking both old and * new state. This is useful in places where the state delta needs to be * considered, for example in atomic check functions. */ #define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->dev->mode_config.num_crtc; \ (__i)++) \ for_each_if ((__state)->crtcs[__i].ptr && \ ((crtc) = (__state)->crtcs[__i].ptr, \ (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ (old_crtc_state) = (__state)->crtcs[__i].old_state, \ (void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \ (new_crtc_state) = (__state)->crtcs[__i].new_state, \ (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1)) /** * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update * @__state: &struct drm_atomic_state pointer * @crtc: &struct drm_crtc iteration cursor * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state * @__i: int iteration cursor, for macro-internal use * * This iterates over all CRTCs in an atomic update, tracking only the old * state. This is useful in disable functions, where we need the old state the * hardware is still in. */ #define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->dev->mode_config.num_crtc; \ (__i)++) \ for_each_if ((__state)->crtcs[__i].ptr && \ ((crtc) = (__state)->crtcs[__i].ptr, \ (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ (old_crtc_state) = (__state)->crtcs[__i].old_state, 1)) /** * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update * @__state: &struct drm_atomic_state pointer * @crtc: &struct drm_crtc iteration cursor * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use * * This iterates over all CRTCs in an atomic update, tracking only the new * state. This is useful in enable functions, where we need the new state the * hardware should be in when the atomic commit operation has completed. */ #define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->dev->mode_config.num_crtc; \ (__i)++) \ for_each_if ((__state)->crtcs[__i].ptr && \ ((crtc) = (__state)->crtcs[__i].ptr, \ (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ (new_crtc_state) = (__state)->crtcs[__i].new_state, \ (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1)) /** * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update * @__state: &struct drm_atomic_state pointer * @plane: &struct drm_plane iteration cursor * @old_plane_state: &struct drm_plane_state iteration cursor for the old state * @new_plane_state: &struct drm_plane_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use * * This iterates over all planes in an atomic update, tracking both old and * new state. This is useful in places where the state delta needs to be * considered, for example in atomic check functions. */ #define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->dev->mode_config.num_total_plane; \ (__i)++) \ for_each_if ((__state)->planes[__i].ptr && \ ((plane) = (__state)->planes[__i].ptr, \ (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ (old_plane_state) = (__state)->planes[__i].old_state,\ (new_plane_state) = (__state)->planes[__i].new_state, 1)) /** * for_each_oldnew_plane_in_state_reverse - iterate over all planes in an atomic * update in reverse order * @__state: &struct drm_atomic_state pointer * @plane: &struct drm_plane iteration cursor * @old_plane_state: &struct drm_plane_state iteration cursor for the old state * @new_plane_state: &struct drm_plane_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use * * This iterates over all planes in an atomic update in reverse order, * tracking both old and new state. This is useful in places where the * state delta needs to be considered, for example in atomic check functions. */ #define for_each_oldnew_plane_in_state_reverse(__state, plane, old_plane_state, new_plane_state, __i) \ for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ (__i) >= 0; \ (__i)--) \ for_each_if ((__state)->planes[__i].ptr && \ ((plane) = (__state)->planes[__i].ptr, \ (old_plane_state) = (__state)->planes[__i].old_state,\ (new_plane_state) = (__state)->planes[__i].new_state, 1)) /** * for_each_new_plane_in_state_reverse - other than only tracking new state, * it's the same as for_each_oldnew_plane_in_state_reverse * @__state: &struct drm_atomic_state pointer * @plane: &struct drm_plane iteration cursor * @new_plane_state: &struct drm_plane_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use */ #define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \ for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \ (__i) >= 0; \ (__i)--) \ for_each_if ((__state)->planes[__i].ptr && \ ((plane) = (__state)->planes[__i].ptr, \ (new_plane_state) = (__state)->planes[__i].new_state, 1)) /** * for_each_old_plane_in_state - iterate over all planes in an atomic update * @__state: &struct drm_atomic_state pointer * @plane: &struct drm_plane iteration cursor * @old_plane_state: &struct drm_plane_state iteration cursor for the old state * @__i: int iteration cursor, for macro-internal use * * This iterates over all planes in an atomic update, tracking only the old * state. This is useful in disable functions, where we need the old state the * hardware is still in. */ #define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->dev->mode_config.num_total_plane; \ (__i)++) \ for_each_if ((__state)->planes[__i].ptr && \ ((plane) = (__state)->planes[__i].ptr, \ (old_plane_state) = (__state)->planes[__i].old_state, 1)) /** * for_each_new_plane_in_state - iterate over all planes in an atomic update * @__state: &struct drm_atomic_state pointer * @plane: &struct drm_plane iteration cursor * @new_plane_state: &struct drm_plane_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use * * This iterates over all planes in an atomic update, tracking only the new * state. This is useful in enable functions, where we need the new state the * hardware should be in when the atomic commit operation has completed. */ #define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->dev->mode_config.num_total_plane; \ (__i)++) \ for_each_if ((__state)->planes[__i].ptr && \ ((plane) = (__state)->planes[__i].ptr, \ (void)(plane) /* Only to avoid unused-but-set-variable warning */, \ (new_plane_state) = (__state)->planes[__i].new_state, \ (void)(new_plane_state) /* Only to avoid unused-but-set-variable warning */, 1)) /** * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update * @__state: &struct drm_atomic_state pointer * @obj: &struct drm_private_obj iteration cursor * @old_obj_state: &struct drm_private_state iteration cursor for the old state * @new_obj_state: &struct drm_private_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use * * This iterates over all private objects in an atomic update, tracking both * old and new state. This is useful in places where the state delta needs * to be considered, for example in atomic check functions. */ #define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_private_objs && \ ((obj) = (__state)->private_objs[__i].ptr, \ (old_obj_state) = (__state)->private_objs[__i].old_state, \ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ (__i)++) /** * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update * @__state: &struct drm_atomic_state pointer * @obj: &struct drm_private_obj iteration cursor * @old_obj_state: &struct drm_private_state iteration cursor for the old state * @__i: int iteration cursor, for macro-internal use * * This iterates over all private objects in an atomic update, tracking only * the old state. This is useful in disable functions, where we need the old * state the hardware is still in. */ #define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_private_objs && \ ((obj) = (__state)->private_objs[__i].ptr, \ (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \ (__i)++) /** * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update * @__state: &struct drm_atomic_state pointer * @obj: &struct drm_private_obj iteration cursor * @new_obj_state: &struct drm_private_state iteration cursor for the new state * @__i: int iteration cursor, for macro-internal use * * This iterates over all private objects in an atomic update, tracking only * the new state. This is useful in enable functions, where we need the new state the * hardware should be in when the atomic commit operation has completed. */ #define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_private_objs && \ ((obj) = (__state)->private_objs[__i].ptr, \ (void)(obj) /* Only to avoid unused-but-set-variable warning */, \ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ (__i)++) /** * drm_atomic_crtc_needs_modeset - compute combined modeset need * @state: &drm_crtc_state for the CRTC * * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track * whether the state CRTC changed enough to need a full modeset cycle: * mode_changed, active_changed and connectors_changed. This helper simply * combines these three to compute the overall need for a modeset for @state. * * The atomic helper code sets these booleans, but drivers can and should * change them appropriately to accurately represent whether a modeset is * really needed. In general, drivers should avoid full modesets whenever * possible. * * For example if the CRTC mode has changed, and the hardware is able to enact * the requested mode change without going through a full modeset, the driver * should clear mode_changed in its &drm_mode_config_funcs.atomic_check * implementation. */ static inline bool drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) { return state->mode_changed || state->active_changed || state->connectors_changed; } /** * drm_atomic_crtc_effectively_active - compute whether CRTC is actually active * @state: &drm_crtc_state for the CRTC * * When in self refresh mode, the crtc_state->active value will be false, since * the CRTC is off. However in some cases we're interested in whether the CRTC * is active, or effectively active (ie: it's connected to an active display). * In these cases, use this function instead of just checking active. */ static inline bool drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state) { return state->active || state->self_refresh_active; } /** * struct drm_bus_cfg - bus configuration * * This structure stores the configuration of a physical bus between two * components in an output pipeline, usually between two bridges, an encoder * and a bridge, or a bridge and a connector. * * The bus configuration is stored in &drm_bridge_state separately for the * input and output buses, as seen from the point of view of each bridge. The * bus configuration of a bridge output is usually identical to the * configuration of the next bridge's input, but may differ if the signals are * modified between the two bridges, for instance by an inverter on the board. * The input and output configurations of a bridge may differ if the bridge * modifies the signals internally, for instance by performing format * conversion, or modifying signals polarities. */ struct drm_bus_cfg { /** * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format) * * This field should not be directly modified by drivers * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus * format negotiation). */ u32 format; /** * @flags: DRM_BUS_* flags used on this bus */ u32 flags; }; /** * struct drm_bridge_state - Atomic bridge state object */ struct drm_bridge_state { /** * @base: inherit from &drm_private_state */ struct drm_private_state base; /** * @bridge: the bridge this state refers to */ struct drm_bridge *bridge; /** * @input_bus_cfg: input bus configuration */ struct drm_bus_cfg input_bus_cfg; /** * @output_bus_cfg: output bus configuration */ struct drm_bus_cfg output_bus_cfg; }; static inline struct drm_bridge_state * drm_priv_to_bridge_state(struct drm_private_state *priv) { return container_of(priv, struct drm_bridge_state, base); } struct drm_bridge_state * drm_atomic_get_bridge_state(struct drm_atomic_state *state, struct drm_bridge *bridge); struct drm_bridge_state * drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state, struct drm_bridge *bridge); struct drm_bridge_state * drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state, struct drm_bridge *bridge); #endif /* DRM_ATOMIC_H_ */ |
385 384 1 1 6 5 5 6 5 20 19 18 17 44 27 8 9 2 2 2 2 1 6 6 5 4 1 1 1 7 5 4 4 7 9 2 9 3 9 2 9 2 9 2 9 3 9 3 17 1 17 2 17 3 17 2 16 2 17 1 17 1 16 1 1 9 13 7 11 6 13 6 12 12 1 10 1 9 1 10 8 10 6 14 13 13 13 13 4 9 13 10 13 14 2 27 2 6 5 5 1 1 10 10 8 1 1 1 1 1 448 1 2 2 6 2 2 1 450 1 1 1 1 1 6 7 7 5 2 2 6 2 10 1 1 400 67 456 450 447 448 384 448 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ioctl.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/syscalls.h> #include <linux/mm.h> #include <linux/capability.h> #include <linux/compat.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/security.h> #include <linux/export.h> #include <linux/uaccess.h> #include <linux/writeback.h> #include <linux/buffer_head.h> #include <linux/falloc.h> #include <linux/sched/signal.h> #include <linux/fiemap.h> #include <linux/mount.h> #include <linux/fscrypt.h> #include <linux/fileattr.h> #include "internal.h" #include <asm/ioctls.h> /* So that the fiemap access checks can't overflow on 32 bit machines. */ #define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent)) /** * vfs_ioctl - call filesystem specific ioctl methods * @filp: open file to invoke ioctl method on * @cmd: ioctl command to execute * @arg: command-specific argument for ioctl * * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise * returns -ENOTTY. * * Returns 0 on success, -errno on error. */ int vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error = -ENOTTY; if (!filp->f_op->unlocked_ioctl) goto out; error = filp->f_op->unlocked_ioctl(filp, cmd, arg); if (error == -ENOIOCTLCMD) error = -ENOTTY; out: return error; } EXPORT_SYMBOL(vfs_ioctl); static int ioctl_fibmap(struct file *filp, int __user *p) { struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; int error, ur_block; sector_t block; if (!capable(CAP_SYS_RAWIO)) return -EPERM; error = get_user(ur_block, p); if (error) return error; if (ur_block < 0) return -EINVAL; block = ur_block; error = bmap(inode, &block); if (block > INT_MAX) { error = -ERANGE; pr_warn_ratelimited("[%s/%d] FS: %s File: %pD4 would truncate fibmap result\n", current->comm, task_pid_nr(current), sb->s_id, filp); } if (error) ur_block = 0; else ur_block = block; if (put_user(ur_block, p)) error = -EFAULT; return error; } /** * fiemap_fill_next_extent - Fiemap helper function * @fieinfo: Fiemap context passed into ->fiemap * @logical: Extent logical start offset, in bytes * @phys: Extent physical start offset, in bytes * @len: Extent length, in bytes * @flags: FIEMAP_EXTENT flags that describe this extent * * Called from file system ->fiemap callback. Will populate extent * info as passed in via arguments and copy to user memory. On * success, extent count on fieinfo is incremented. * * Returns 0 on success, -errno on error, 1 if this was the last * extent that will fit in user array. */ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical, u64 phys, u64 len, u32 flags) { struct fiemap_extent extent; struct fiemap_extent __user *dest = fieinfo->fi_extents_start; /* only count the extents */ if (fieinfo->fi_extents_max == 0) { fieinfo->fi_extents_mapped++; return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; } if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max) return 1; #define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC) #define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED) #define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE) if (flags & SET_UNKNOWN_FLAGS) flags |= FIEMAP_EXTENT_UNKNOWN; if (flags & SET_NO_UNMOUNTED_IO_FLAGS) flags |= FIEMAP_EXTENT_ENCODED; if (flags & SET_NOT_ALIGNED_FLAGS) flags |= FIEMAP_EXTENT_NOT_ALIGNED; memset(&extent, 0, sizeof(extent)); extent.fe_logical = logical; extent.fe_physical = phys; extent.fe_length = len; extent.fe_flags = flags; dest += fieinfo->fi_extents_mapped; if (copy_to_user(dest, &extent, sizeof(extent))) return -EFAULT; fieinfo->fi_extents_mapped++; if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max) return 1; return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; } EXPORT_SYMBOL(fiemap_fill_next_extent); /** * fiemap_prep - check validity of requested flags for fiemap * @inode: Inode to operate on * @fieinfo: Fiemap context passed into ->fiemap * @start: Start of the mapped range * @len: Length of the mapped range, can be truncated by this function. * @supported_flags: Set of fiemap flags that the file system understands * * This function must be called from each ->fiemap instance to validate the * fiemap request against the file system parameters. * * Returns 0 on success, or a negative error on failure. */ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 *len, u32 supported_flags) { u64 maxbytes = inode->i_sb->s_maxbytes; u32 incompat_flags; int ret = 0; if (*len == 0) return -EINVAL; if (start >= maxbytes) return -EFBIG; /* * Shrink request scope to what the fs can actually handle. */ if (*len > maxbytes || (maxbytes - *len) < start) *len = maxbytes - start; supported_flags |= FIEMAP_FLAG_SYNC; supported_flags &= FIEMAP_FLAGS_COMPAT; incompat_flags = fieinfo->fi_flags & ~supported_flags; if (incompat_flags) { fieinfo->fi_flags = incompat_flags; return -EBADR; } if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) ret = filemap_write_and_wait(inode->i_mapping); return ret; } EXPORT_SYMBOL(fiemap_prep); static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap) { struct fiemap fiemap; struct fiemap_extent_info fieinfo = { 0, }; struct inode *inode = file_inode(filp); int error; if (!inode->i_op->fiemap) return -EOPNOTSUPP; if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap))) return -EFAULT; if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS) return -EINVAL; fieinfo.fi_flags = fiemap.fm_flags; fieinfo.fi_extents_max = fiemap.fm_extent_count; fieinfo.fi_extents_start = ufiemap->fm_extents; error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, fiemap.fm_length); fiemap.fm_flags = fieinfo.fi_flags; fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped; if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap))) error = -EFAULT; return error; } static int ioctl_file_clone(struct file *dst_file, unsigned long srcfd, u64 off, u64 olen, u64 destoff) { CLASS(fd, src_file)(srcfd); loff_t cloned; int ret; if (fd_empty(src_file)) return -EBADF; cloned = vfs_clone_file_range(fd_file(src_file), off, dst_file, destoff, olen, 0); if (cloned < 0) ret = cloned; else if (olen && cloned != olen) ret = -EINVAL; else ret = 0; return ret; } static int ioctl_file_clone_range(struct file *file, struct file_clone_range __user *argp) { struct file_clone_range args; if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; return ioctl_file_clone(file, args.src_fd, args.src_offset, args.src_length, args.dest_offset); } /* * This provides compatibility with legacy XFS pre-allocation ioctls * which predate the fallocate syscall. * * Only the l_start, l_len and l_whence fields of the 'struct space_resv' * are used here, rest are ignored. */ static int ioctl_preallocate(struct file *filp, int mode, void __user *argp) { struct inode *inode = file_inode(filp); struct space_resv sr; if (copy_from_user(&sr, argp, sizeof(sr))) return -EFAULT; switch (sr.l_whence) { case SEEK_SET: break; case SEEK_CUR: sr.l_start += filp->f_pos; break; case SEEK_END: sr.l_start += i_size_read(inode); break; default: return -EINVAL; } return vfs_fallocate(filp, mode | FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len); } /* on ia32 l_start is on a 32-bit boundary */ #if defined CONFIG_COMPAT && defined(CONFIG_X86_64) /* just account for different alignment */ static int compat_ioctl_preallocate(struct file *file, int mode, struct space_resv_32 __user *argp) { struct inode *inode = file_inode(file); struct space_resv_32 sr; if (copy_from_user(&sr, argp, sizeof(sr))) return -EFAULT; switch (sr.l_whence) { case SEEK_SET: break; case SEEK_CUR: sr.l_start += file->f_pos; break; case SEEK_END: sr.l_start += i_size_read(inode); break; default: return -EINVAL; } return vfs_fallocate(file, mode | FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len); } #endif static int file_ioctl(struct file *filp, unsigned int cmd, int __user *p) { switch (cmd) { case FIBMAP: return ioctl_fibmap(filp, p); case FS_IOC_RESVSP: case FS_IOC_RESVSP64: return ioctl_preallocate(filp, 0, p); case FS_IOC_UNRESVSP: case FS_IOC_UNRESVSP64: return ioctl_preallocate(filp, FALLOC_FL_PUNCH_HOLE, p); case FS_IOC_ZERO_RANGE: return ioctl_preallocate(filp, FALLOC_FL_ZERO_RANGE, p); } return -ENOIOCTLCMD; } static int ioctl_fionbio(struct file *filp, int __user *argp) { unsigned int flag; int on, error; error = get_user(on, argp); if (error) return error; flag = O_NONBLOCK; #ifdef __sparc__ /* SunOS compatibility item. */ if (O_NONBLOCK != O_NDELAY) flag |= O_NDELAY; #endif spin_lock(&filp->f_lock); if (on) filp->f_flags |= flag; else filp->f_flags &= ~flag; spin_unlock(&filp->f_lock); return error; } static int ioctl_fioasync(unsigned int fd, struct file *filp, int __user *argp) { unsigned int flag; int on, error; error = get_user(on, argp); if (error) return error; flag = on ? FASYNC : 0; /* Did FASYNC state change ? */ if ((flag ^ filp->f_flags) & FASYNC) { if (filp->f_op->fasync) /* fasync() adjusts filp->f_flags */ error = filp->f_op->fasync(fd, filp, on); else error = -ENOTTY; } return error < 0 ? error : 0; } static int ioctl_fsfreeze(struct file *filp) { struct super_block *sb = file_inode(filp)->i_sb; if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) return -EPERM; /* If filesystem doesn't support freeze feature, return. */ if (sb->s_op->freeze_fs == NULL && sb->s_op->freeze_super == NULL) return -EOPNOTSUPP; /* Freeze */ if (sb->s_op->freeze_super) return sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL); return freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL); } static int ioctl_fsthaw(struct file *filp) { struct super_block *sb = file_inode(filp)->i_sb; if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) return -EPERM; /* Thaw */ if (sb->s_op->thaw_super) return sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE, NULL); return thaw_super(sb, FREEZE_HOLDER_USERSPACE, NULL); } static int ioctl_file_dedupe_range(struct file *file, struct file_dedupe_range __user *argp) { struct file_dedupe_range *same = NULL; int ret; unsigned long size; u16 count; if (get_user(count, &argp->dest_count)) { ret = -EFAULT; goto out; } size = offsetof(struct file_dedupe_range, info[count]); if (size > PAGE_SIZE) { ret = -ENOMEM; goto out; } same = memdup_user(argp, size); if (IS_ERR(same)) { ret = PTR_ERR(same); same = NULL; goto out; } same->dest_count = count; ret = vfs_dedupe_file_range(file, same); if (ret) goto out; ret = copy_to_user(argp, same, size); if (ret) ret = -EFAULT; out: kfree(same); return ret; } /** * fileattr_fill_xflags - initialize fileattr with xflags * @fa: fileattr pointer * @xflags: FS_XFLAG_* flags * * Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All * other fields are zeroed. */ void fileattr_fill_xflags(struct fileattr *fa, u32 xflags) { memset(fa, 0, sizeof(*fa)); fa->fsx_valid = true; fa->fsx_xflags = xflags; if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE) fa->flags |= FS_IMMUTABLE_FL; if (fa->fsx_xflags & FS_XFLAG_APPEND) fa->flags |= FS_APPEND_FL; if (fa->fsx_xflags & FS_XFLAG_SYNC) fa->flags |= FS_SYNC_FL; if (fa->fsx_xflags & FS_XFLAG_NOATIME) fa->flags |= FS_NOATIME_FL; if (fa->fsx_xflags & FS_XFLAG_NODUMP) fa->flags |= FS_NODUMP_FL; if (fa->fsx_xflags & FS_XFLAG_DAX) fa->flags |= FS_DAX_FL; if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT) fa->flags |= FS_PROJINHERIT_FL; } EXPORT_SYMBOL(fileattr_fill_xflags); /** * fileattr_fill_flags - initialize fileattr with flags * @fa: fileattr pointer * @flags: FS_*_FL flags * * Set ->flags, ->flags_valid and ->fsx_xflags (translated flags). * All other fields are zeroed. */ void fileattr_fill_flags(struct fileattr *fa, u32 flags) { memset(fa, 0, sizeof(*fa)); fa->flags_valid = true; fa->flags = flags; if (fa->flags & FS_SYNC_FL) fa->fsx_xflags |= FS_XFLAG_SYNC; if (fa->flags & FS_IMMUTABLE_FL) fa->fsx_xflags |= FS_XFLAG_IMMUTABLE; if (fa->flags & FS_APPEND_FL) fa->fsx_xflags |= FS_XFLAG_APPEND; if (fa->flags & FS_NODUMP_FL) fa->fsx_xflags |= FS_XFLAG_NODUMP; if (fa->flags & FS_NOATIME_FL) fa->fsx_xflags |= FS_XFLAG_NOATIME; if (fa->flags & FS_DAX_FL) fa->fsx_xflags |= FS_XFLAG_DAX; if (fa->flags & FS_PROJINHERIT_FL) fa->fsx_xflags |= FS_XFLAG_PROJINHERIT; } EXPORT_SYMBOL(fileattr_fill_flags); /** * vfs_fileattr_get - retrieve miscellaneous file attributes * @dentry: the object to retrieve from * @fa: fileattr pointer * * Call i_op->fileattr_get() callback, if exists. * * Return: 0 on success, or a negative error on failure. */ int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); if (!inode->i_op->fileattr_get) return -ENOIOCTLCMD; return inode->i_op->fileattr_get(dentry, fa); } EXPORT_SYMBOL(vfs_fileattr_get); /** * copy_fsxattr_to_user - copy fsxattr to userspace. * @fa: fileattr pointer * @ufa: fsxattr user pointer * * Return: 0 on success, or -EFAULT on failure. */ int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa) { struct fsxattr xfa; memset(&xfa, 0, sizeof(xfa)); xfa.fsx_xflags = fa->fsx_xflags; xfa.fsx_extsize = fa->fsx_extsize; xfa.fsx_nextents = fa->fsx_nextents; xfa.fsx_projid = fa->fsx_projid; xfa.fsx_cowextsize = fa->fsx_cowextsize; if (copy_to_user(ufa, &xfa, sizeof(xfa))) return -EFAULT; return 0; } EXPORT_SYMBOL(copy_fsxattr_to_user); static int copy_fsxattr_from_user(struct fileattr *fa, struct fsxattr __user *ufa) { struct fsxattr xfa; if (copy_from_user(&xfa, ufa, sizeof(xfa))) return -EFAULT; fileattr_fill_xflags(fa, xfa.fsx_xflags); fa->fsx_extsize = xfa.fsx_extsize; fa->fsx_nextents = xfa.fsx_nextents; fa->fsx_projid = xfa.fsx_projid; fa->fsx_cowextsize = xfa.fsx_cowextsize; return 0; } /* * Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject * any invalid configurations. * * Note: must be called with inode lock held. */ static int fileattr_set_prepare(struct inode *inode, const struct fileattr *old_ma, struct fileattr *fa) { int err; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. */ if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) && !capable(CAP_LINUX_IMMUTABLE)) return -EPERM; err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags); if (err) return err; /* * Project Quota ID state is only allowed to change from within the init * namespace. Enforce that restriction only if we are trying to change * the quota ID state. Everything else is allowed in user namespaces. */ if (current_user_ns() != &init_user_ns) { if (old_ma->fsx_projid != fa->fsx_projid) return -EINVAL; if ((old_ma->fsx_xflags ^ fa->fsx_xflags) & FS_XFLAG_PROJINHERIT) return -EINVAL; } else { /* * Caller is allowed to change the project ID. If it is being * changed, make sure that the new value is valid. */ if (old_ma->fsx_projid != fa->fsx_projid && !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid))) return -EINVAL; } /* Check extent size hints. */ if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode)) return -EINVAL; if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && !S_ISDIR(inode->i_mode)) return -EINVAL; if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) && !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -EINVAL; /* * It is only valid to set the DAX flag on regular files and * directories on filesystems. */ if ((fa->fsx_xflags & FS_XFLAG_DAX) && !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) return -EINVAL; /* Extent size hints of zero turn off the flags. */ if (fa->fsx_extsize == 0) fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); if (fa->fsx_cowextsize == 0) fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; return 0; } /** * vfs_fileattr_set - change miscellaneous file attributes * @idmap: idmap of the mount * @dentry: the object to change * @fa: fileattr pointer * * After verifying permissions, call i_op->fileattr_set() callback, if * exists. * * Verifying attributes involves retrieving current attributes with * i_op->fileattr_get(), this also allows initializing attributes that have * not been set by the caller to current values. Inode lock is held * thoughout to prevent racing with another instance. * * Return: 0 on success, or a negative error on failure. */ int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); struct fileattr old_ma = {}; int err; if (!inode->i_op->fileattr_set) return -ENOIOCTLCMD; if (!inode_owner_or_capable(idmap, inode)) return -EPERM; inode_lock(inode); err = vfs_fileattr_get(dentry, &old_ma); if (!err) { /* initialize missing bits from old_ma */ if (fa->flags_valid) { fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON; fa->fsx_extsize = old_ma.fsx_extsize; fa->fsx_nextents = old_ma.fsx_nextents; fa->fsx_projid = old_ma.fsx_projid; fa->fsx_cowextsize = old_ma.fsx_cowextsize; } else { fa->flags |= old_ma.flags & ~FS_COMMON_FL; } err = fileattr_set_prepare(inode, &old_ma, fa); if (!err) err = inode->i_op->fileattr_set(idmap, dentry, fa); } inode_unlock(inode); return err; } EXPORT_SYMBOL(vfs_fileattr_set); static int ioctl_getflags(struct file *file, unsigned int __user *argp) { struct fileattr fa = { .flags_valid = true }; /* hint only */ int err; err = vfs_fileattr_get(file->f_path.dentry, &fa); if (!err) err = put_user(fa.flags, argp); return err; } static int ioctl_setflags(struct file *file, unsigned int __user *argp) { struct mnt_idmap *idmap = file_mnt_idmap(file); struct dentry *dentry = file->f_path.dentry; struct fileattr fa; unsigned int flags; int err; err = get_user(flags, argp); if (!err) { err = mnt_want_write_file(file); if (!err) { fileattr_fill_flags(&fa, flags); err = vfs_fileattr_set(idmap, dentry, &fa); mnt_drop_write_file(file); } } return err; } static int ioctl_fsgetxattr(struct file *file, void __user *argp) { struct fileattr fa = { .fsx_valid = true }; /* hint only */ int err; err = vfs_fileattr_get(file->f_path.dentry, &fa); if (!err) err = copy_fsxattr_to_user(&fa, argp); return err; } static int ioctl_fssetxattr(struct file *file, void __user *argp) { struct mnt_idmap *idmap = file_mnt_idmap(file); struct dentry *dentry = file->f_path.dentry; struct fileattr fa; int err; err = copy_fsxattr_from_user(&fa, argp); if (!err) { err = mnt_want_write_file(file); if (!err) { err = vfs_fileattr_set(idmap, dentry, &fa); mnt_drop_write_file(file); } } return err; } static int ioctl_getfsuuid(struct file *file, void __user *argp) { struct super_block *sb = file_inode(file)->i_sb; struct fsuuid2 u = { .len = sb->s_uuid_len, }; if (!sb->s_uuid_len) return -ENOTTY; memcpy(&u.uuid[0], &sb->s_uuid, sb->s_uuid_len); return copy_to_user(argp, &u, sizeof(u)) ? -EFAULT : 0; } static int ioctl_get_fs_sysfs_path(struct file *file, void __user *argp) { struct super_block *sb = file_inode(file)->i_sb; if (!strlen(sb->s_sysfs_name)) return -ENOTTY; struct fs_sysfs_path u = {}; u.len = scnprintf(u.name, sizeof(u.name), "%s/%s", sb->s_type->name, sb->s_sysfs_name); return copy_to_user(argp, &u, sizeof(u)) ? -EFAULT : 0; } /* * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d. * It's just a simple helper for sys_ioctl and compat_sys_ioctl. * * When you add any new common ioctls to the switches above and below, * please ensure they have compatible arguments in compat mode. * * The LSM mailing list should also be notified of any command additions or * changes, as specific LSMs may be affected. */ static int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct inode *inode = file_inode(filp); switch (cmd) { case FIOCLEX: set_close_on_exec(fd, 1); return 0; case FIONCLEX: set_close_on_exec(fd, 0); return 0; case FIONBIO: return ioctl_fionbio(filp, argp); case FIOASYNC: return ioctl_fioasync(fd, filp, argp); case FIOQSIZE: if (S_ISDIR(inode->i_mode) || (S_ISREG(inode->i_mode) && !IS_ANON_FILE(inode)) || S_ISLNK(inode->i_mode)) { loff_t res = inode_get_bytes(inode); return copy_to_user(argp, &res, sizeof(res)) ? -EFAULT : 0; } return -ENOTTY; case FIFREEZE: return ioctl_fsfreeze(filp); case FITHAW: return ioctl_fsthaw(filp); case FS_IOC_FIEMAP: return ioctl_fiemap(filp, argp); case FIGETBSZ: /* anon_bdev filesystems may not have a block size */ if (!inode->i_sb->s_blocksize) return -EINVAL; return put_user(inode->i_sb->s_blocksize, (int __user *)argp); case FICLONE: return ioctl_file_clone(filp, arg, 0, 0, 0); case FICLONERANGE: return ioctl_file_clone_range(filp, argp); case FIDEDUPERANGE: return ioctl_file_dedupe_range(filp, argp); case FIONREAD: if (!S_ISREG(inode->i_mode) || IS_ANON_FILE(inode)) return vfs_ioctl(filp, cmd, arg); return put_user(i_size_read(inode) - filp->f_pos, (int __user *)argp); case FS_IOC_GETFLAGS: return ioctl_getflags(filp, argp); case FS_IOC_SETFLAGS: return ioctl_setflags(filp, argp); case FS_IOC_FSGETXATTR: return ioctl_fsgetxattr(filp, argp); case FS_IOC_FSSETXATTR: return ioctl_fssetxattr(filp, argp); case FS_IOC_GETFSUUID: return ioctl_getfsuuid(filp, argp); case FS_IOC_GETFSSYSFSPATH: return ioctl_get_fs_sysfs_path(filp, argp); default: if (S_ISREG(inode->i_mode) && !IS_ANON_FILE(inode)) return file_ioctl(filp, cmd, argp); break; } return -ENOIOCTLCMD; } SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { CLASS(fd, f)(fd); int error; if (fd_empty(f)) return -EBADF; error = security_file_ioctl(fd_file(f), cmd, arg); if (error) return error; error = do_vfs_ioctl(fd_file(f), fd, cmd, arg); if (error == -ENOIOCTLCMD) error = vfs_ioctl(fd_file(f), cmd, arg); return error; } #ifdef CONFIG_COMPAT /** * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation * @file: The file to operate on. * @cmd: The ioctl command number. * @arg: The argument to the ioctl. * * This is not normally called as a function, but instead set in struct * file_operations as * * .compat_ioctl = compat_ptr_ioctl, * * On most architectures, the compat_ptr_ioctl() just passes all arguments * to the corresponding ->ioctl handler. The exception is arch/s390, where * compat_ptr() clears the top bit of a 32-bit pointer value, so user space * pointers to the second 2GB alias the first 2GB, as is the case for * native 32-bit s390 user space. * * The compat_ptr_ioctl() function must therefore be used only with ioctl * functions that either ignore the argument or pass a pointer to a * compatible data type. * * If any ioctl command handled by fops->unlocked_ioctl passes a plain * integer instead of a pointer, or any of the passed data types * is incompatible between 32-bit and 64-bit architectures, a proper * handler is required instead of compat_ptr_ioctl. */ long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { if (!file->f_op->unlocked_ioctl) return -ENOIOCTLCMD; return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); } EXPORT_SYMBOL(compat_ptr_ioctl); COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, compat_ulong_t, arg) { CLASS(fd, f)(fd); int error; if (fd_empty(f)) return -EBADF; error = security_file_ioctl_compat(fd_file(f), cmd, arg); if (error) return error; switch (cmd) { /* FICLONE takes an int argument, so don't use compat_ptr() */ case FICLONE: error = ioctl_file_clone(fd_file(f), arg, 0, 0, 0); break; #if defined(CONFIG_X86_64) /* these get messy on amd64 due to alignment differences */ case FS_IOC_RESVSP_32: case FS_IOC_RESVSP64_32: error = compat_ioctl_preallocate(fd_file(f), 0, compat_ptr(arg)); break; case FS_IOC_UNRESVSP_32: case FS_IOC_UNRESVSP64_32: error = compat_ioctl_preallocate(fd_file(f), FALLOC_FL_PUNCH_HOLE, compat_ptr(arg)); break; case FS_IOC_ZERO_RANGE_32: error = compat_ioctl_preallocate(fd_file(f), FALLOC_FL_ZERO_RANGE, compat_ptr(arg)); break; #endif /* * These access 32-bit values anyway so no further handling is * necessary. */ case FS_IOC32_GETFLAGS: case FS_IOC32_SETFLAGS: cmd = (cmd == FS_IOC32_GETFLAGS) ? FS_IOC_GETFLAGS : FS_IOC_SETFLAGS; fallthrough; /* * everything else in do_vfs_ioctl() takes either a compatible * pointer argument or no argument -- call it with a modified * argument. */ default: error = do_vfs_ioctl(fd_file(f), fd, cmd, (unsigned long)compat_ptr(arg)); if (error != -ENOIOCTLCMD) break; if (fd_file(f)->f_op->compat_ioctl) error = fd_file(f)->f_op->compat_ioctl(fd_file(f), cmd, arg); if (error == -ENOIOCTLCMD) error = -ENOTTY; break; } return error; } #endif |
4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 | // SPDX-License-Identifier: GPL-2.0 /* * OF NUMA Parsing support. * * Copyright (C) 2015 - 2016 Cavium Inc. */ #define pr_fmt(fmt) "OF: NUMA: " fmt #include <linux/of.h> #include <linux/of_address.h> #include <linux/nodemask.h> #include <linux/numa_memblks.h> #include <asm/numa.h> /* * Even though we connect cpus to numa domains later in SMP * init, we need to know the node ids now for all cpus. */ static void __init of_numa_parse_cpu_nodes(void) { u32 nid; int r; struct device_node *np; for_each_of_cpu_node(np) { r = of_property_read_u32(np, "numa-node-id", &nid); if (r) continue; pr_debug("CPU on %u\n", nid); if (nid >= MAX_NUMNODES) pr_warn("Node id %u exceeds maximum value\n", nid); else node_set(nid, numa_nodes_parsed); } } static int __init of_numa_parse_memory_nodes(void) { struct device_node *np = NULL; struct resource rsrc; u32 nid; int i, r = -EINVAL; for_each_node_by_type(np, "memory") { r = of_property_read_u32(np, "numa-node-id", &nid); if (r == -EINVAL) /* * property doesn't exist if -EINVAL, continue * looking for more memory nodes with * "numa-node-id" property */ continue; if (nid >= MAX_NUMNODES) { pr_warn("Node id %u exceeds maximum value\n", nid); r = -EINVAL; } for (i = 0; !r && !of_address_to_resource(np, i, &rsrc); i++) r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1); if (!i || r) { of_node_put(np); pr_err("bad property in memory node\n"); return r ? : -EINVAL; } } return r; } static int __init of_numa_parse_distance_map_v1(struct device_node *map) { const __be32 *matrix; int entry_count; int i; pr_info("parsing numa-distance-map-v1\n"); matrix = of_get_property(map, "distance-matrix", NULL); if (!matrix) { pr_err("No distance-matrix property in distance-map\n"); return -EINVAL; } entry_count = of_property_count_u32_elems(map, "distance-matrix"); if (entry_count <= 0) { pr_err("Invalid distance-matrix\n"); return -EINVAL; } for (i = 0; i + 2 < entry_count; i += 3) { u32 nodea, nodeb, distance; nodea = of_read_number(matrix, 1); matrix++; nodeb = of_read_number(matrix, 1); matrix++; distance = of_read_number(matrix, 1); matrix++; if ((nodea == nodeb && distance != LOCAL_DISTANCE) || (nodea != nodeb && distance <= LOCAL_DISTANCE)) { pr_err("Invalid distance[node%d -> node%d] = %d\n", nodea, nodeb, distance); return -EINVAL; } node_set(nodea, numa_nodes_parsed); numa_set_distance(nodea, nodeb, distance); /* Set default distance of node B->A same as A->B */ if (nodeb > nodea) numa_set_distance(nodeb, nodea, distance); } return 0; } static int __init of_numa_parse_distance_map(void) { int ret = 0; struct device_node *np; np = of_find_compatible_node(NULL, NULL, "numa-distance-map-v1"); if (np) ret = of_numa_parse_distance_map_v1(np); of_node_put(np); return ret; } int of_node_to_nid(struct device_node *device) { struct device_node *np; u32 nid; int r = -ENODATA; np = of_node_get(device); while (np) { r = of_property_read_u32(np, "numa-node-id", &nid); /* * -EINVAL indicates the property was not found, and * we walk up the tree trying to find a parent with a * "numa-node-id". Any other type of error indicates * a bad device tree and we give up. */ if (r != -EINVAL) break; np = of_get_next_parent(np); } if (np && r) pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n", np); of_node_put(np); /* * If numa=off passed on command line, or with a defective * device tree, the nid may not be in the set of possible * nodes. Check for this case and return NUMA_NO_NODE. */ if (!r && nid < MAX_NUMNODES && node_possible(nid)) return nid; return NUMA_NO_NODE; } int __init of_numa_init(void) { int r; of_numa_parse_cpu_nodes(); r = of_numa_parse_memory_nodes(); if (r) return r; return of_numa_parse_distance_map(); } |
3232 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 | /* SPDX-License-Identifier: GPL-2.0 */ /* linux/include/linux/clockchips.h * * This file contains the structure definitions for clockchips. * * If you are not a clockchip, or the time of day code, you should * not be including this file! */ #ifndef _LINUX_CLOCKCHIPS_H #define _LINUX_CLOCKCHIPS_H #ifdef CONFIG_GENERIC_CLOCKEVENTS # include <linux/clocksource.h> # include <linux/cpumask_types.h> # include <linux/ktime.h> # include <linux/notifier.h> struct clock_event_device; struct module; /* * Possible states of a clock event device. * * DETACHED: Device is not used by clockevents core. Initial state or can be * reached from SHUTDOWN. * SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT. * PERIODIC: Device is programmed to generate events periodically. Can be * reached from DETACHED or SHUTDOWN. * ONESHOT: Device is programmed to generate event only once. Can be reached * from DETACHED or SHUTDOWN. * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily * stopped. */ enum clock_event_state { CLOCK_EVT_STATE_DETACHED, CLOCK_EVT_STATE_SHUTDOWN, CLOCK_EVT_STATE_PERIODIC, CLOCK_EVT_STATE_ONESHOT, CLOCK_EVT_STATE_ONESHOT_STOPPED, }; /* * Clock event features */ # define CLOCK_EVT_FEAT_PERIODIC 0x000001 # define CLOCK_EVT_FEAT_ONESHOT 0x000002 # define CLOCK_EVT_FEAT_KTIME 0x000004 /* * x86(64) specific (mis)features: * * - Clockevent source stops in C3 State and needs broadcast support. * - Local APIC timer is used as a dummy device. */ # define CLOCK_EVT_FEAT_C3STOP 0x000008 # define CLOCK_EVT_FEAT_DUMMY 0x000010 /* * Core shall set the interrupt affinity dynamically in broadcast mode */ # define CLOCK_EVT_FEAT_DYNIRQ 0x000020 # define CLOCK_EVT_FEAT_PERCPU 0x000040 /* * Clockevent device is based on a hrtimer for broadcast */ # define CLOCK_EVT_FEAT_HRTIMER 0x000080 /** * struct clock_event_device - clock event device descriptor * @event_handler: Assigned by the framework to be called by the low * level handler of the event source * @set_next_event: set next event function using a clocksource delta * @set_next_ktime: set next event function using a direct ktime value * @next_event: local storage for the next event in oneshot mode * @max_delta_ns: maximum delta value in ns * @min_delta_ns: minimum delta value in ns * @mult: nanosecond to cycles multiplier * @shift: nanoseconds to cycles divisor (power of two) * @state_use_accessors:current state of the device, assigned by the core code * @features: features * @retries: number of forced programming retries * @set_state_periodic: switch state to periodic * @set_state_oneshot: switch state to oneshot * @set_state_oneshot_stopped: switch state to oneshot_stopped * @set_state_shutdown: switch state to shutdown * @tick_resume: resume clkevt device * @broadcast: function to broadcast events * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration * @name: ptr to clock event name * @rating: variable to rate clock event devices * @irq: IRQ number (only for non CPU local devices) * @bound_on: Bound on CPU * @cpumask: cpumask to indicate for which CPUs this device works * @list: list head for the management code * @owner: module reference */ struct clock_event_device { void (*event_handler)(struct clock_event_device *); int (*set_next_event)(unsigned long evt, struct clock_event_device *); int (*set_next_ktime)(ktime_t expires, struct clock_event_device *); ktime_t next_event; u64 max_delta_ns; u64 min_delta_ns; u32 mult; u32 shift; enum clock_event_state state_use_accessors; unsigned int features; unsigned long retries; int (*set_state_periodic)(struct clock_event_device *); int (*set_state_oneshot)(struct clock_event_device *); int (*set_state_oneshot_stopped)(struct clock_event_device *); int (*set_state_shutdown)(struct clock_event_device *); int (*tick_resume)(struct clock_event_device *); void (*broadcast)(const struct cpumask *mask); void (*suspend)(struct clock_event_device *); void (*resume)(struct clock_event_device *); unsigned long min_delta_ticks; unsigned long max_delta_ticks; const char *name; int rating; int irq; int bound_on; const struct cpumask *cpumask; struct list_head list; struct module *owner; } ____cacheline_aligned; /* Helpers to verify state of a clockevent device */ static inline bool clockevent_state_detached(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED; } static inline bool clockevent_state_shutdown(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN; } static inline bool clockevent_state_periodic(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC; } static inline bool clockevent_state_oneshot(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT; } static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED; } /* * Calculate a multiplication factor for scaled math, which is used to convert * nanoseconds based values to clock ticks: * * clock_ticks = (nanoseconds * factor) >> shift. * * div_sc is the rearranged equation to calculate a factor from a given clock * ticks / nanoseconds ratio: * * factor = (clock_ticks << shift) / nanoseconds */ static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec, int shift) { u64 tmp = ((u64)ticks) << shift; do_div(tmp, nsec); return (unsigned long) tmp; } /* Clock event layer functions */ extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt); extern void clockevents_register_device(struct clock_event_device *dev); extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); extern void clockevents_config_and_register(struct clock_event_device *dev, u32 freq, unsigned long min_delta, unsigned long max_delta); extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); static inline void clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec) { return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec); } extern void clockevents_suspend(void); extern void clockevents_resume(void); # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST # ifdef CONFIG_ARCH_HAS_TICK_BROADCAST extern void tick_broadcast(const struct cpumask *mask); # else # define tick_broadcast NULL # endif extern int tick_receive_broadcast(void); # endif # if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) extern void tick_setup_hrtimer_broadcast(void); extern int tick_check_broadcast_expired(void); # else static __always_inline int tick_check_broadcast_expired(void) { return 0; } static inline void tick_setup_hrtimer_broadcast(void) { } # endif #else /* !CONFIG_GENERIC_CLOCKEVENTS: */ static inline void clockevents_suspend(void) { } static inline void clockevents_resume(void) { } static __always_inline int tick_check_broadcast_expired(void) { return 0; } static inline void tick_setup_hrtimer_broadcast(void) { } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ #endif /* _LINUX_CLOCKCHIPS_H */ |
3264 3259 172 2 175 2 2 173 171 3264 3271 3271 3264 171 170 173 172 172 173 173 171 169 172 173 173 172 171 173 173 173 5 5 5 5 5 5 5 5 5 4 5 5 5 5 5 5 5 2 2 2 2 2 5 2 5 5 5 5 5 5 2 2 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 | // SPDX-License-Identifier: GPL-2.0-only /* * Generic helpers for smp ipi calls * * (C) Jens Axboe <jens.axboe@oracle.com> 2008 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/irq_work.h> #include <linux/rcupdate.h> #include <linux/rculist.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/gfp.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/sched/idle.h> #include <linux/hypervisor.h> #include <linux/sched/clock.h> #include <linux/nmi.h> #include <linux/sched/debug.h> #include <linux/jump_label.h> #include <linux/string_choices.h> #include <trace/events/ipi.h> #define CREATE_TRACE_POINTS #include <trace/events/csd.h> #undef CREATE_TRACE_POINTS #include "smpboot.h" #include "sched/smp.h" #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK) struct call_function_data { call_single_data_t __percpu *csd; cpumask_var_t cpumask; cpumask_var_t cpumask_ipi; }; static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data); static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1); static void __flush_smp_call_function_queue(bool warn_cpu_offline); int smpcfd_prepare_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return -ENOMEM; if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, cpu_to_node(cpu))) { free_cpumask_var(cfd->cpumask); return -ENOMEM; } cfd->csd = alloc_percpu(call_single_data_t); if (!cfd->csd) { free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); return -ENOMEM; } return 0; } int smpcfd_dead_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); free_percpu(cfd->csd); return 0; } int smpcfd_dying_cpu(unsigned int cpu) { /* * The IPIs for the smp-call-function callbacks queued by other * CPUs might arrive late, either due to hardware latencies or * because this CPU disabled interrupts (inside stop-machine) * before the IPIs were sent. So flush out any pending callbacks * explicitly (without waiting for the IPIs to arrive), to * ensure that the outgoing CPU doesn't go offline with work * still pending. */ __flush_smp_call_function_queue(false); irq_work_run(); return 0; } void __init call_function_init(void) { int i; for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); smpcfd_prepare_cpu(smp_processor_id()); } static __always_inline void send_call_function_single_ipi(int cpu) { if (call_function_single_prep_ipi(cpu)) { trace_ipi_send_cpu(cpu, _RET_IP_, generic_smp_call_function_single_interrupt); arch_send_call_function_single_ipi(cpu); } } static __always_inline void send_call_function_ipi_mask(struct cpumask *mask) { trace_ipi_send_cpumask(mask, _RET_IP_, generic_smp_call_function_single_interrupt); arch_send_call_function_ipi_mask(mask); } static __always_inline void csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd) { trace_csd_function_entry(func, csd); func(info); trace_csd_function_exit(func, csd); } #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled); /* * Parse the csdlock_debug= kernel boot parameter. * * If you need to restore the old "ext" value that once provided * additional debugging information, reapply the following commits: * * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging") * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging") */ static int __init csdlock_debug(char *str) { int ret; unsigned int val = 0; ret = get_option(&str, &val); if (ret) { if (val) static_branch_enable(&csdlock_debug_enabled); else static_branch_disable(&csdlock_debug_enabled); } return 1; } __setup("csdlock_debug=", csdlock_debug); static DEFINE_PER_CPU(call_single_data_t *, cur_csd); static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); static DEFINE_PER_CPU(void *, cur_csd_info); static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */ module_param(csd_lock_timeout, ulong, 0644); static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */ module_param(panic_on_ipistall, int, 0644); static atomic_t csd_bug_count = ATOMIC_INIT(0); /* Record current CSD work for current CPU, NULL to erase. */ static void __csd_lock_record(call_single_data_t *csd) { if (!csd) { smp_mb(); /* NULL cur_csd after unlock. */ __this_cpu_write(cur_csd, NULL); return; } __this_cpu_write(cur_csd_func, csd->func); __this_cpu_write(cur_csd_info, csd->info); smp_wmb(); /* func and info before csd. */ __this_cpu_write(cur_csd, csd); smp_mb(); /* Update cur_csd before function call. */ /* Or before unlock, as the case may be. */ } static __always_inline void csd_lock_record(call_single_data_t *csd) { if (static_branch_unlikely(&csdlock_debug_enabled)) __csd_lock_record(csd); } static int csd_lock_wait_getcpu(call_single_data_t *csd) { unsigned int csd_type; csd_type = CSD_TYPE(csd); if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC) return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ return -1; } static atomic_t n_csd_lock_stuck; /** * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long? * * Returns @true if a CSD-lock acquisition is stuck and has been stuck * long enough for a "non-responsive CSD lock" message to be printed. */ bool csd_lock_is_stuck(void) { return !!atomic_read(&n_csd_lock_stuck); } /* * Complain if too much time spent waiting. Note that only * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, * so waiting on other types gets much less information. */ static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id, unsigned long *nmessages) { int cpu = -1; int cpux; bool firsttime; u64 ts2, ts_delta; call_single_data_t *cpu_cur_csd; unsigned int flags = READ_ONCE(csd->node.u_flags); unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC; if (!(flags & CSD_FLAG_LOCK)) { if (!unlikely(*bug_id)) return true; cpu = csd_lock_wait_getcpu(csd); pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n", *bug_id, raw_smp_processor_id(), cpu); atomic_dec(&n_csd_lock_stuck); return true; } ts2 = ktime_get_mono_fast_ns(); /* How long since we last checked for a stuck CSD lock.*/ ts_delta = ts2 - *ts1; if (likely(ts_delta <= csd_lock_timeout_ns * (*nmessages + 1) * (!*nmessages ? 1 : (ilog2(num_online_cpus()) / 2 + 1)) || csd_lock_timeout_ns == 0)) return false; if (ts0 > ts2) { /* Our own sched_clock went backward; don't blame another CPU. */ ts_delta = ts0 - ts2; pr_alert("sched_clock on CPU %d went backward by %llu ns\n", raw_smp_processor_id(), ts_delta); *ts1 = ts2; return false; } firsttime = !*bug_id; if (firsttime) *bug_id = atomic_inc_return(&csd_bug_count); cpu = csd_lock_wait_getcpu(csd); if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) cpux = 0; else cpux = cpu; cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */ /* How long since this CSD lock was stuck. */ ts_delta = ts2 - ts0; pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\n", firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), (s64)ts_delta, cpu, csd->func, csd->info); (*nmessages)++; if (firsttime) atomic_inc(&n_csd_lock_stuck); /* * If the CSD lock is still stuck after 5 minutes, it is unlikely * to become unstuck. Use a signed comparison to avoid triggering * on underflows when the TSC is out of sync between sockets. */ BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC)); if (cpu_cur_csd && csd != cpu_cur_csd) { pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n", *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)), READ_ONCE(per_cpu(cur_csd_info, cpux))); } else { pr_alert("\tcsd: CSD lock (#%d) %s.\n", *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request"); } if (cpu >= 0) { if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0)) dump_cpu_task(cpu); if (!cpu_cur_csd) { pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu); arch_send_call_function_single_ipi(cpu); } } if (firsttime) dump_stack(); *ts1 = ts2; return false; } /* * csd_lock/csd_unlock used to serialize access to per-cpu csd resources * * For non-synchronous ipi calls the csd can still be in use by the * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */ static void __csd_lock_wait(call_single_data_t *csd) { unsigned long nmessages = 0; int bug_id = 0; u64 ts0, ts1; ts1 = ts0 = ktime_get_mono_fast_ns(); for (;;) { if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id, &nmessages)) break; cpu_relax(); } smp_acquire__after_ctrl_dep(); } static __always_inline void csd_lock_wait(call_single_data_t *csd) { if (static_branch_unlikely(&csdlock_debug_enabled)) { __csd_lock_wait(csd); return; } smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); } #else static void csd_lock_record(call_single_data_t *csd) { } static __always_inline void csd_lock_wait(call_single_data_t *csd) { smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); } #endif static __always_inline void csd_lock(call_single_data_t *csd) { csd_lock_wait(csd); csd->node.u_flags |= CSD_FLAG_LOCK; /* * prevent CPU from reordering the above assignment * to ->flags with any subsequent assignments to other * fields of the specified call_single_data_t structure: */ smp_wmb(); } static __always_inline void csd_unlock(call_single_data_t *csd) { WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); /* * ensure we're all done before releasing data: */ smp_store_release(&csd->node.u_flags, 0); } static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); void __smp_call_single_queue(int cpu, struct llist_node *node) { /* * We have to check the type of the CSD before queueing it, because * once queued it can have its flags cleared by * flush_smp_call_function_queue() * even if we haven't sent the smp_call IPI yet (e.g. the stopper * executes migration_cpu_stop() on the remote CPU). */ if (trace_csd_queue_cpu_enabled()) { call_single_data_t *csd; smp_call_func_t func; csd = container_of(node, call_single_data_t, node.llist); func = CSD_TYPE(csd) == CSD_TYPE_TTWU ? sched_ttwu_pending : csd->func; trace_csd_queue_cpu(cpu, _RET_IP_, func, csd); } /* * The list addition should be visible to the target CPU when it pops * the head of the list to pull the entry off it in the IPI handler * because of normal cache coherency rules implied by the underlying * llist ops. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added * to arch code to make it appear to obey cache coherency WRT * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ if (llist_add(node, &per_cpu(call_single_queue, cpu))) send_call_function_single_ipi(cpu); } /* * Insert a previously allocated call_single_data_t element * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. */ static int generic_exec_single(int cpu, call_single_data_t *csd) { if (cpu == smp_processor_id()) { smp_call_func_t func = csd->func; void *info = csd->info; unsigned long flags; /* * We can unlock early even for the synchronous on-stack case, * since we're doing this from the same CPU.. */ csd_lock_record(csd); csd_unlock(csd); local_irq_save(flags); csd_do_func(func, info, NULL); csd_lock_record(NULL); local_irq_restore(flags); return 0; } if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { csd_unlock(csd); return -ENXIO; } __smp_call_single_queue(cpu, &csd->node.llist); return 0; } /** * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks * * Invoked by arch to handle an IPI for call function single. * Must be called with interrupts disabled. */ void generic_smp_call_function_single_interrupt(void) { __flush_smp_call_function_queue(true); } /** * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks * * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an * offline CPU. Skip this check if set to 'false'. * * Flush any pending smp-call-function callbacks queued on this CPU. This is * invoked by the generic IPI handler, as well as by a CPU about to go offline, * to ensure that all pending IPI callbacks are run before it goes completely * offline. * * Loop through the call_single_queue and run all the queued callbacks. * Must be called with interrupts disabled. */ static void __flush_smp_call_function_queue(bool warn_cpu_offline) { call_single_data_t *csd, *csd_next; struct llist_node *entry, *prev; struct llist_head *head; static bool warned; atomic_t *tbt; lockdep_assert_irqs_disabled(); /* Allow waiters to send backtrace NMI from here onwards */ tbt = this_cpu_ptr(&trigger_backtrace); atomic_set_release(tbt, 1); head = this_cpu_ptr(&call_single_queue); entry = llist_del_all(head); entry = llist_reverse_order(entry); /* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && !warned && entry != NULL)) { warned = true; WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); /* * We don't have to use the _safe() variant here * because we are not invoking the IPI handlers yet. */ llist_for_each_entry(csd, entry, node.llist) { switch (CSD_TYPE(csd)) { case CSD_TYPE_ASYNC: case CSD_TYPE_SYNC: case CSD_TYPE_IRQ_WORK: pr_warn("IPI callback %pS sent to offline CPU\n", csd->func); break; case CSD_TYPE_TTWU: pr_warn("IPI task-wakeup sent to offline CPU\n"); break; default: pr_warn("IPI callback, unknown type %d, sent to offline CPU\n", CSD_TYPE(csd)); break; } } } /* * First; run all SYNC callbacks, people are waiting for us. */ prev = NULL; llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { /* Do we wait until *after* callback? */ if (CSD_TYPE(csd) == CSD_TYPE_SYNC) { smp_call_func_t func = csd->func; void *info = csd->info; if (prev) { prev->next = &csd_next->node.llist; } else { entry = &csd_next->node.llist; } csd_lock_record(csd); csd_do_func(func, info, csd); csd_unlock(csd); csd_lock_record(NULL); } else { prev = &csd->node.llist; } } if (!entry) return; /* * Second; run all !SYNC callbacks. */ prev = NULL; llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { int type = CSD_TYPE(csd); if (type != CSD_TYPE_TTWU) { if (prev) { prev->next = &csd_next->node.llist; } else { entry = &csd_next->node.llist; } if (type == CSD_TYPE_ASYNC) { smp_call_func_t func = csd->func; void *info = csd->info; csd_lock_record(csd); csd_unlock(csd); csd_do_func(func, info, csd); csd_lock_record(NULL); } else if (type == CSD_TYPE_IRQ_WORK) { irq_work_single(csd); } } else { prev = &csd->node.llist; } } /* * Third; only CSD_TYPE_TTWU is left, issue those. */ if (entry) { csd = llist_entry(entry, typeof(*csd), node.llist); csd_do_func(sched_ttwu_pending, entry, csd); } } /** * flush_smp_call_function_queue - Flush pending smp-call-function callbacks * from task context (idle, migration thread) * * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to * handle queued SMP function calls before scheduling. * * The migration thread has to ensure that an eventually pending wakeup has * been handled before it migrates a task. */ void flush_smp_call_function_queue(void) { unsigned int was_pending; unsigned long flags; if (llist_empty(this_cpu_ptr(&call_single_queue))) return; local_irq_save(flags); /* Get the already pending soft interrupts for RT enabled kernels */ was_pending = local_softirq_pending(); __flush_smp_call_function_queue(true); if (local_softirq_pending()) do_softirq_post_smp_call_flush(was_pending); local_irq_restore(flags); } /* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. */ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait) { call_single_data_t *csd; call_single_data_t csd_stack = { .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, }, }; int this_cpu; int err; /* * prevent preemption and reschedule on another processor, * as well as CPU removal */ this_cpu = get_cpu(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); /* * When @wait we can deadlock when we interrupt between llist_add() and * arch_send_call_function_ipi*(); when !@wait we can deadlock due to * csd_lock() on because the interrupt context uses the same csd * storage. */ WARN_ON_ONCE(!in_task()); csd = &csd_stack; if (!wait) { csd = this_cpu_ptr(&csd_data); csd_lock(csd); } csd->func = func; csd->info = info; #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG csd->node.src = smp_processor_id(); csd->node.dst = cpu; #endif err = generic_exec_single(cpu, csd); if (wait) csd_lock_wait(csd); put_cpu(); return err; } EXPORT_SYMBOL(smp_call_function_single); /** * smp_call_function_single_async() - Run an asynchronous function on a * specific CPU. * @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure * * Like smp_call_function_single(), but the call is asynchonous and * can thus be done from contexts with disabled interrupts. * * The caller passes his own pre-allocated data structure * (ie: embedded in an object) and is responsible for synchronizing it * such that the IPIs performed on the @csd are strictly serialized. * * If the function is called with one csd which has not yet been * processed by previous call to smp_call_function_single_async(), the * function will return immediately with -EBUSY showing that the csd * object is still in progress. * * NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. * * Return: %0 on success or negative errno value on error */ int smp_call_function_single_async(int cpu, call_single_data_t *csd) { int err = 0; preempt_disable(); if (csd->node.u_flags & CSD_FLAG_LOCK) { err = -EBUSY; goto out; } csd->node.u_flags = CSD_FLAG_LOCK; smp_wmb(); err = generic_exec_single(cpu, csd); out: preempt_enable(); return err; } EXPORT_SYMBOL_GPL(smp_call_function_single_async); /* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). * * Selection preference: * 1) current cpu if in @mask * 2) any cpu of current node if in @mask * 3) any other online cpu in @mask */ int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait) { unsigned int cpu; const struct cpumask *nodemask; int ret; /* Try for same CPU (cheapest) */ cpu = get_cpu(); if (cpumask_test_cpu(cpu, mask)) goto call; /* Try for same node. */ nodemask = cpumask_of_node(cpu_to_node(cpu)); for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; cpu = cpumask_next_and(cpu, nodemask, mask)) { if (cpu_online(cpu)) goto call; } /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ cpu = cpumask_any_and(mask, cpu_online_mask); call: ret = smp_call_function_single(cpu, func, info, wait); put_cpu(); return ret; } EXPORT_SYMBOL_GPL(smp_call_function_any); /* * Flags to be used as scf_flags argument of smp_call_function_many_cond(). * * %SCF_WAIT: Wait until function execution is completed * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask */ #define SCF_WAIT (1U << 0) #define SCF_RUN_LOCAL (1U << 1) static void smp_call_function_many_cond(const struct cpumask *mask, smp_call_func_t func, void *info, unsigned int scf_flags, smp_cond_func_t cond_func) { int cpu, last_cpu, this_cpu = smp_processor_id(); struct call_function_data *cfd; bool wait = scf_flags & SCF_WAIT; int nr_cpus = 0; bool run_remote = false; bool run_local = false; lockdep_assert_preemption_disabled(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ if (cpu_online(this_cpu) && !oops_in_progress && !early_boot_irqs_disabled) lockdep_assert_irqs_enabled(); /* * When @wait we can deadlock when we interrupt between llist_add() and * arch_send_call_function_ipi*(); when !@wait we can deadlock due to * csd_lock() on because the interrupt context uses the same csd * storage. */ WARN_ON_ONCE(!in_task()); /* Check if we need local execution. */ if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) && (!cond_func || cond_func(this_cpu, info))) run_local = true; /* Check if we need remote execution, i.e., any CPU excluding this one. */ cpu = cpumask_first_and(mask, cpu_online_mask); if (cpu == this_cpu) cpu = cpumask_next_and(cpu, mask, cpu_online_mask); if (cpu < nr_cpu_ids) run_remote = true; if (run_remote) { cfd = this_cpu_ptr(&cfd_data); cpumask_and(cfd->cpumask, mask, cpu_online_mask); __cpumask_clear_cpu(this_cpu, cfd->cpumask); cpumask_clear(cfd->cpumask_ipi); for_each_cpu(cpu, cfd->cpumask) { call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); if (cond_func && !cond_func(cpu, info)) { __cpumask_clear_cpu(cpu, cfd->cpumask); continue; } csd_lock(csd); if (wait) csd->node.u_flags |= CSD_TYPE_SYNC; csd->func = func; csd->info = info; #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG csd->node.src = smp_processor_id(); csd->node.dst = cpu; #endif trace_csd_queue_cpu(cpu, _RET_IP_, func, csd); if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { __cpumask_set_cpu(cpu, cfd->cpumask_ipi); nr_cpus++; last_cpu = cpu; } } /* * Choose the most efficient way to send an IPI. Note that the * number of CPUs might be zero due to concurrent changes to the * provided mask. */ if (nr_cpus == 1) send_call_function_single_ipi(last_cpu); else if (likely(nr_cpus > 1)) send_call_function_ipi_mask(cfd->cpumask_ipi); } if (run_local) { unsigned long flags; local_irq_save(flags); csd_do_func(func, info, NULL); local_irq_restore(flags); } if (run_remote && wait) { for_each_cpu(cpu, cfd->cpumask) { call_single_data_t *csd; csd = per_cpu_ptr(cfd->csd, cpu); csd_lock_wait(csd); } } } /** * smp_call_function_many(): Run a function on a set of CPUs. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait * (atomically) until function has completed on other CPUs. If * %SCF_RUN_LOCAL is set, the function will also be run locally * if the local CPU is set in the @cpumask. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */ void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL); } EXPORT_SYMBOL(smp_call_function_many); /** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * Returns 0. * * If @wait is true, then returns once @func has returned; otherwise * it returns just before the target cpu calls @func. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ void smp_call_function(smp_call_func_t func, void *info, int wait) { preempt_disable(); smp_call_function_many(cpu_online_mask, func, info, wait); preempt_enable(); } EXPORT_SYMBOL(smp_call_function); /* Setup configured maximum number of CPUs to activate */ unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); /* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */ void __weak __init arch_disable_smp_support(void) { } static int __init nosmp(char *str) { setup_max_cpus = 0; arch_disable_smp_support(); return 0; } early_param("nosmp", nosmp); /* this is hard limit */ static int __init nrcpus(char *str) { int nr_cpus; if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) set_nr_cpu_ids(nr_cpus); return 0; } early_param("nr_cpus", nrcpus); static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) arch_disable_smp_support(); return 0; } early_param("maxcpus", maxcpus); #if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS) /* Setup number of possible processor ids */ unsigned int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); #endif /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ void __init setup_nr_cpu_ids(void) { set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1); } /* Called by boot processor to activate the rest. */ void __init smp_init(void) { int num_nodes, num_cpus; idle_threads_init(); cpuhp_threads_init(); pr_info("Bringing up secondary CPUs ...\n"); bringup_nonboot_cpus(setup_max_cpus); num_nodes = num_online_nodes(); num_cpus = num_online_cpus(); pr_info("Brought up %d node%s, %d CPU%s\n", num_nodes, str_plural(num_nodes), num_cpus, str_plural(num_cpus)); /* Any cleanup work */ smp_cpus_done(setup_max_cpus); } /* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting * for all the required CPUs to finish. This may include the local * processor. * @cond_func: A callback function that is passed a cpu id and * the info parameter. The function is called * with preemption disabled. The function should * return a blooean value indicating whether to IPI * the specified CPU. * @func: The function to run on all applicable CPUs. * This must be fast and non-blocking. * @info: An arbitrary pointer to pass to both functions. * @wait: If true, wait (atomically) until function has * completed on other CPUs. * * Preemption is disabled to protect against CPUs going offline but not online. * CPUs going online during the call will not be seen or sent an IPI. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, void *info, bool wait, const struct cpumask *mask) { unsigned int scf_flags = SCF_RUN_LOCAL; if (wait) scf_flags |= SCF_WAIT; preempt_disable(); smp_call_function_many_cond(mask, func, info, scf_flags, cond_func); preempt_enable(); } EXPORT_SYMBOL(on_each_cpu_cond_mask); static void do_nothing(void *unused) { } /** * kick_all_cpus_sync - Force all cpus out of idle * * Used to synchronize the update of pm_idle function pointer. It's * called after the pointer is updated and returns after the dummy * callback function has been executed on all cpus. The execution of * the function can only happen on the remote cpus after they have * left the idle function which had been called via pm_idle function * pointer. So it's guaranteed that nothing uses the previous pointer * anymore. */ void kick_all_cpus_sync(void) { /* Make sure the change is visible before we kick the cpus */ smp_mb(); smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(kick_all_cpus_sync); /** * wake_up_all_idle_cpus - break all cpus out of idle * wake_up_all_idle_cpus try to break all cpus which is in idle state even * including idle polling cpus, for non-idle cpus, we will do nothing * for them. */ void wake_up_all_idle_cpus(void) { int cpu; for_each_possible_cpu(cpu) { preempt_disable(); if (cpu != smp_processor_id() && cpu_online(cpu)) wake_up_if_idle(cpu); preempt_enable(); } } EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); /** * struct smp_call_on_cpu_struct - Call a function on a specific CPU * @work: &work_struct * @done: &completion to signal * @func: function to call * @data: function's data argument * @ret: return value from @func * @cpu: target CPU (%-1 for any CPU) * * Used to call a function on a specific cpu and wait for it to return. * Optionally make sure the call is done on a specified physical cpu via vcpu * pinning in order to support virtualized environments. */ struct smp_call_on_cpu_struct { struct work_struct work; struct completion done; int (*func)(void *); void *data; int ret; int cpu; }; static void smp_call_on_cpu_callback(struct work_struct *work) { struct smp_call_on_cpu_struct *sscs; sscs = container_of(work, struct smp_call_on_cpu_struct, work); if (sscs->cpu >= 0) hypervisor_pin_vcpu(sscs->cpu); sscs->ret = sscs->func(sscs->data); if (sscs->cpu >= 0) hypervisor_pin_vcpu(-1); complete(&sscs->done); } int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) { struct smp_call_on_cpu_struct sscs = { .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), .func = func, .data = par, .cpu = phys ? cpu : -1, }; INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; queue_work_on(cpu, system_wq, &sscs.work); wait_for_completion(&sscs.done); destroy_work_on_stack(&sscs.work); return sscs.ret; } EXPORT_SYMBOL_GPL(smp_call_on_cpu); |
2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 | // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <linux/usb/uvc.h> #include <linux/videodev2.h> /* ------------------------------------------------------------------------ * Video formats */ static const struct uvc_format_desc uvc_fmts[] = { { .guid = UVC_GUID_FORMAT_YUY2, .fcc = V4L2_PIX_FMT_YUYV, }, { .guid = UVC_GUID_FORMAT_YUY2_ISIGHT, .fcc = V4L2_PIX_FMT_YUYV, }, { .guid = UVC_GUID_FORMAT_NV12, .fcc = V4L2_PIX_FMT_NV12, }, { .guid = UVC_GUID_FORMAT_MJPEG, .fcc = V4L2_PIX_FMT_MJPEG, }, { .guid = UVC_GUID_FORMAT_YV12, .fcc = V4L2_PIX_FMT_YVU420, }, { .guid = UVC_GUID_FORMAT_I420, .fcc = V4L2_PIX_FMT_YUV420, }, { .guid = UVC_GUID_FORMAT_M420, .fcc = V4L2_PIX_FMT_M420, }, { .guid = UVC_GUID_FORMAT_UYVY, .fcc = V4L2_PIX_FMT_UYVY, }, { .guid = UVC_GUID_FORMAT_Y800, .fcc = V4L2_PIX_FMT_GREY, }, { .guid = UVC_GUID_FORMAT_Y8, .fcc = V4L2_PIX_FMT_GREY, }, { .guid = UVC_GUID_FORMAT_D3DFMT_L8, .fcc = V4L2_PIX_FMT_GREY, }, { .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR, .fcc = V4L2_PIX_FMT_GREY, }, { .guid = UVC_GUID_FORMAT_Y10, .fcc = V4L2_PIX_FMT_Y10, }, { .guid = UVC_GUID_FORMAT_Y12, .fcc = V4L2_PIX_FMT_Y12, }, { .guid = UVC_GUID_FORMAT_Y16, .fcc = V4L2_PIX_FMT_Y16, }, { .guid = UVC_GUID_FORMAT_BY8, .fcc = V4L2_PIX_FMT_SBGGR8, }, { .guid = UVC_GUID_FORMAT_BA81, .fcc = V4L2_PIX_FMT_SBGGR8, }, { .guid = UVC_GUID_FORMAT_GBRG, .fcc = V4L2_PIX_FMT_SGBRG8, }, { .guid = UVC_GUID_FORMAT_GRBG, .fcc = V4L2_PIX_FMT_SGRBG8, }, { .guid = UVC_GUID_FORMAT_RGGB, .fcc = V4L2_PIX_FMT_SRGGB8, }, { .guid = UVC_GUID_FORMAT_RGBP, .fcc = V4L2_PIX_FMT_RGB565, }, { .guid = UVC_GUID_FORMAT_D3DFMT_R5G6B5, .fcc = V4L2_PIX_FMT_RGB565, }, { .guid = UVC_GUID_FORMAT_BGR3, .fcc = V4L2_PIX_FMT_BGR24, }, { .guid = UVC_GUID_FORMAT_BGR4, .fcc = V4L2_PIX_FMT_XBGR32, }, { .guid = UVC_GUID_FORMAT_H264, .fcc = V4L2_PIX_FMT_H264, }, { .guid = UVC_GUID_FORMAT_H265, .fcc = V4L2_PIX_FMT_HEVC, }, { .guid = UVC_GUID_FORMAT_Y8I, .fcc = V4L2_PIX_FMT_Y8I, }, { .guid = UVC_GUID_FORMAT_Y12I, .fcc = V4L2_PIX_FMT_Y12I, }, { .guid = UVC_GUID_FORMAT_Y16I, .fcc = V4L2_PIX_FMT_Y16I, }, { .guid = UVC_GUID_FORMAT_Z16, .fcc = V4L2_PIX_FMT_Z16, }, { .guid = UVC_GUID_FORMAT_RW10, .fcc = V4L2_PIX_FMT_SRGGB10P, }, { .guid = UVC_GUID_FORMAT_BG16, .fcc = V4L2_PIX_FMT_SBGGR16, }, { .guid = UVC_GUID_FORMAT_GB16, .fcc = V4L2_PIX_FMT_SGBRG16, }, { .guid = UVC_GUID_FORMAT_RG16, .fcc = V4L2_PIX_FMT_SRGGB16, }, { .guid = UVC_GUID_FORMAT_GR16, .fcc = V4L2_PIX_FMT_SGRBG16, }, { .guid = UVC_GUID_FORMAT_INVZ, .fcc = V4L2_PIX_FMT_Z16, }, { .guid = UVC_GUID_FORMAT_INVI, .fcc = V4L2_PIX_FMT_Y10, }, { .guid = UVC_GUID_FORMAT_INZI, .fcc = V4L2_PIX_FMT_INZI, }, { .guid = UVC_GUID_FORMAT_CNF4, .fcc = V4L2_PIX_FMT_CNF4, }, { .guid = UVC_GUID_FORMAT_HEVC, .fcc = V4L2_PIX_FMT_HEVC, }, }; const struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16]) { unsigned int len = ARRAY_SIZE(uvc_fmts); unsigned int i; for (i = 0; i < len; ++i) { if (memcmp(guid, uvc_fmts[i].guid, 16) == 0) return &uvc_fmts[i]; } return NULL; } EXPORT_SYMBOL_GPL(uvc_format_by_guid); MODULE_DESCRIPTION("USB Video Class common code"); MODULE_LICENSE("GPL"); |
100 100 100 59 59 59 59 59 100 99 100 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 | // SPDX-License-Identifier: GPL-2.0-only /* * Media device node * * Copyright (C) 2010 Nokia Corporation * * Based on drivers/media/video/v4l2_dev.c code authored by * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2) * Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1) * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * -- * * Generic media device node infrastructure to register and unregister * character devices using a dynamic major number and proper reference * counting. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/types.h> #include <linux/uaccess.h> #include <media/media-devnode.h> #include <media/media-device.h> #define MEDIA_NUM_DEVICES 256 #define MEDIA_NAME "media" static dev_t media_dev_t; /* * Active devices */ static DEFINE_MUTEX(media_devnode_lock); static DECLARE_BITMAP(media_devnode_nums, MEDIA_NUM_DEVICES); /* Called when the last user of the media device exits. */ static void media_devnode_release(struct device *cd) { struct media_devnode *devnode = to_media_devnode(cd); mutex_lock(&media_devnode_lock); /* Mark device node number as free */ clear_bit(devnode->minor, media_devnode_nums); mutex_unlock(&media_devnode_lock); /* Release media_devnode and perform other cleanups as needed. */ if (devnode->release) devnode->release(devnode); kfree(devnode); pr_debug("%s: Media Devnode Deallocated\n", __func__); } static const struct bus_type media_bus_type = { .name = MEDIA_NAME, }; static ssize_t media_read(struct file *filp, char __user *buf, size_t sz, loff_t *off) { struct media_devnode *devnode = media_devnode_data(filp); if (!devnode->fops->read) return -EINVAL; if (!media_devnode_is_registered(devnode)) return -EIO; return devnode->fops->read(filp, buf, sz, off); } static ssize_t media_write(struct file *filp, const char __user *buf, size_t sz, loff_t *off) { struct media_devnode *devnode = media_devnode_data(filp); if (!devnode->fops->write) return -EINVAL; if (!media_devnode_is_registered(devnode)) return -EIO; return devnode->fops->write(filp, buf, sz, off); } static __poll_t media_poll(struct file *filp, struct poll_table_struct *poll) { struct media_devnode *devnode = media_devnode_data(filp); if (!media_devnode_is_registered(devnode)) return EPOLLERR | EPOLLHUP; if (!devnode->fops->poll) return DEFAULT_POLLMASK; return devnode->fops->poll(filp, poll); } static long __media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg, long (*ioctl_func)(struct file *filp, unsigned int cmd, unsigned long arg)) { struct media_devnode *devnode = media_devnode_data(filp); if (!ioctl_func) return -ENOTTY; if (!media_devnode_is_registered(devnode)) return -EIO; return ioctl_func(filp, cmd, arg); } static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct media_devnode *devnode = media_devnode_data(filp); return __media_ioctl(filp, cmd, arg, devnode->fops->ioctl); } #ifdef CONFIG_COMPAT static long media_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct media_devnode *devnode = media_devnode_data(filp); return __media_ioctl(filp, cmd, arg, devnode->fops->compat_ioctl); } #endif /* CONFIG_COMPAT */ /* Override for the open function */ static int media_open(struct inode *inode, struct file *filp) { struct media_devnode *devnode; int ret; /* Check if the media device is available. This needs to be done with * the media_devnode_lock held to prevent an open/unregister race: * without the lock, the device could be unregistered and freed between * the media_devnode_is_registered() and get_device() calls, leading to * a crash. */ mutex_lock(&media_devnode_lock); devnode = container_of(inode->i_cdev, struct media_devnode, cdev); /* return ENXIO if the media device has been removed already or if it is not registered anymore. */ if (!media_devnode_is_registered(devnode)) { mutex_unlock(&media_devnode_lock); return -ENXIO; } /* and increase the device refcount */ get_device(&devnode->dev); mutex_unlock(&media_devnode_lock); filp->private_data = devnode; if (devnode->fops->open) { ret = devnode->fops->open(filp); if (ret) { put_device(&devnode->dev); filp->private_data = NULL; return ret; } } return 0; } /* Override for the release function */ static int media_release(struct inode *inode, struct file *filp) { struct media_devnode *devnode = media_devnode_data(filp); if (devnode->fops->release) devnode->fops->release(filp); filp->private_data = NULL; /* decrease the refcount unconditionally since the release() return value is ignored. */ put_device(&devnode->dev); return 0; } static const struct file_operations media_devnode_fops = { .owner = THIS_MODULE, .read = media_read, .write = media_write, .open = media_open, .unlocked_ioctl = media_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = media_compat_ioctl, #endif /* CONFIG_COMPAT */ .release = media_release, .poll = media_poll, }; int __must_check media_devnode_register(struct media_device *mdev, struct media_devnode *devnode, struct module *owner) { int minor; int ret; /* Part 1: Find a free minor number */ mutex_lock(&media_devnode_lock); minor = find_first_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES); if (minor == MEDIA_NUM_DEVICES) { mutex_unlock(&media_devnode_lock); pr_err("could not get a free minor\n"); kfree(devnode); return -ENFILE; } set_bit(minor, media_devnode_nums); mutex_unlock(&media_devnode_lock); devnode->minor = minor; devnode->media_dev = mdev; /* Part 1: Initialize dev now to use dev.kobj for cdev.kobj.parent */ devnode->dev.bus = &media_bus_type; devnode->dev.devt = MKDEV(MAJOR(media_dev_t), devnode->minor); devnode->dev.release = media_devnode_release; if (devnode->parent) devnode->dev.parent = devnode->parent; dev_set_name(&devnode->dev, "media%d", devnode->minor); device_initialize(&devnode->dev); /* Part 2: Initialize the character device */ cdev_init(&devnode->cdev, &media_devnode_fops); devnode->cdev.owner = owner; kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor); /* Part 3: Add the media and char device */ set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); ret = cdev_device_add(&devnode->cdev, &devnode->dev); if (ret < 0) { clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); pr_err("%s: cdev_device_add failed\n", __func__); goto cdev_add_error; } return 0; cdev_add_error: mutex_lock(&media_devnode_lock); clear_bit(devnode->minor, media_devnode_nums); devnode->media_dev = NULL; mutex_unlock(&media_devnode_lock); put_device(&devnode->dev); return ret; } void media_devnode_unregister_prepare(struct media_devnode *devnode) { /* Check if devnode was ever registered at all */ if (!media_devnode_is_registered(devnode)) return; mutex_lock(&media_devnode_lock); clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); mutex_unlock(&media_devnode_lock); } void media_devnode_unregister(struct media_devnode *devnode) { mutex_lock(&media_devnode_lock); /* Delete the cdev on this minor as well */ cdev_device_del(&devnode->cdev, &devnode->dev); devnode->media_dev = NULL; mutex_unlock(&media_devnode_lock); put_device(&devnode->dev); } /* * Initialise media for linux */ static int __init media_devnode_init(void) { int ret; pr_info("Linux media interface: v0.10\n"); ret = alloc_chrdev_region(&media_dev_t, 0, MEDIA_NUM_DEVICES, MEDIA_NAME); if (ret < 0) { pr_warn("unable to allocate major\n"); return ret; } ret = bus_register(&media_bus_type); if (ret < 0) { unregister_chrdev_region(media_dev_t, MEDIA_NUM_DEVICES); pr_warn("bus_register failed\n"); return -EIO; } return 0; } static void __exit media_devnode_exit(void) { bus_unregister(&media_bus_type); unregister_chrdev_region(media_dev_t, MEDIA_NUM_DEVICES); } subsys_initcall(media_devnode_init); module_exit(media_devnode_exit) MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); MODULE_DESCRIPTION("Device node registration for media drivers"); MODULE_LICENSE("GPL"); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_SMP_H #define _ASM_X86_SMP_H #ifndef __ASSEMBLER__ #include <linux/cpumask.h> #include <linux/thread_info.h> #include <asm/cpumask.h> DECLARE_PER_CPU_CACHE_HOT(int, cpu_number); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); /* cpus sharing the last level cache: */ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid); struct task_struct; struct smp_ops { void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_cpus)(unsigned max_cpus); void (*smp_cpus_done)(unsigned max_cpus); void (*stop_other_cpus)(int wait); void (*crash_stop_other_cpus)(void); void (*smp_send_reschedule)(int cpu); void (*cleanup_dead_cpu)(unsigned cpu); void (*poll_sync_state)(void); int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle); int (*cpu_disable)(void); void (*cpu_die)(unsigned int cpu); void (*play_dead)(void); void (*stop_this_cpu)(void); void (*send_call_func_ipi)(const struct cpumask *mask); void (*send_call_func_single_ipi)(int cpu); }; /* Globals due to paravirt */ extern void set_cpu_sibling_map(int cpu); #ifdef CONFIG_SMP extern struct smp_ops smp_ops; static inline void smp_send_stop(void) { smp_ops.stop_other_cpus(0); } static inline void stop_other_cpus(void) { smp_ops.stop_other_cpus(1); } static inline void smp_prepare_cpus(unsigned int max_cpus) { smp_ops.smp_prepare_cpus(max_cpus); } static inline void smp_cpus_done(unsigned int max_cpus) { smp_ops.smp_cpus_done(max_cpus); } static inline int __cpu_disable(void) { return smp_ops.cpu_disable(); } static inline void __cpu_die(unsigned int cpu) { if (smp_ops.cpu_die) smp_ops.cpu_die(cpu); } static inline void __noreturn play_dead(void) { smp_ops.play_dead(); BUG(); } static inline void arch_smp_send_reschedule(int cpu) { smp_ops.smp_send_reschedule(cpu); } static inline void arch_send_call_function_single_ipi(int cpu) { smp_ops.send_call_func_single_ipi(cpu); } static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_ops.send_call_func_ipi(mask); } void cpu_disable_common(void); void native_smp_prepare_boot_cpu(void); void smp_prepare_cpus_common(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); int common_cpu_up(unsigned int cpunum, struct task_struct *tidle); int native_kick_ap(unsigned int cpu, struct task_struct *tidle); int native_cpu_disable(void); void __noreturn hlt_play_dead(void); void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); int wbinvd_on_all_cpus(void); void smp_kick_mwait_play_dead(void); void __noreturn mwait_play_dead(unsigned int eax_hint); void native_smp_send_reschedule(int cpu); void native_send_call_func_ipi(const struct cpumask *mask); void native_send_call_func_single_ipi(int cpu); asmlinkage __visible void smp_reboot_interrupt(void); __visible void smp_reschedule_interrupt(struct pt_regs *regs); __visible void smp_call_function_interrupt(struct pt_regs *regs); __visible void smp_call_function_single_interrupt(struct pt_regs *r); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. */ #define raw_smp_processor_id() this_cpu_read(cpu_number) #define __smp_processor_id() __this_cpu_read(cpu_number) static inline struct cpumask *cpu_llc_shared_mask(int cpu) { return per_cpu(cpu_llc_shared_map, cpu); } static inline struct cpumask *cpu_l2c_shared_mask(int cpu) { return per_cpu(cpu_l2c_shared_map, cpu); } #else /* !CONFIG_SMP */ #define wbinvd_on_cpu(cpu) wbinvd() static inline int wbinvd_on_all_cpus(void) { wbinvd(); return 0; } static inline struct cpumask *cpu_llc_shared_mask(int cpu) { return (struct cpumask *)cpumask_of(0); } static inline void __noreturn mwait_play_dead(unsigned int eax_hint) { BUG(); } #endif /* CONFIG_SMP */ #ifdef CONFIG_DEBUG_NMI_SELFTEST extern void nmi_selftest(void); #else #define nmi_selftest() do { } while (0) #endif extern unsigned int smpboot_control; extern unsigned long apic_mmio_base; #endif /* !__ASSEMBLER__ */ /* Control bits for startup_64 */ #define STARTUP_READ_APICID 0x80000000 /* Top 8 bits are reserved for control */ #define STARTUP_PARALLEL_MASK 0xFF000000 #endif /* _ASM_X86_SMP_H */ |
3249 3249 3249 3242 3242 3242 2491 3248 3249 3241 3239 3240 3240 3242 5 3236 3240 3242 3046 3047 3044 3050 3046 3043 3050 8 3044 3244 3243 3242 3248 3050 3048 3046 2490 2488 2490 2487 3241 3242 3241 2494 3242 3248 2494 3249 3244 3245 3239 3245 3242 3240 3237 3240 3238 3248 3238 37 3245 4 4 4 4 4 4 4 4 4 3 4 4 3244 3249 3249 37 3239 3240 3243 3249 3249 3242 43 43 43 3244 3240 3243 5 4 4 4 5 5 3 4 4 5 4 4 4 4 4 4 4 4 3 5 3240 3240 3248 3235 3236 3230 3243 28 3241 3243 3238 3249 3244 3249 3244 3237 3246 3239 2494 3235 3242 3241 3240 3237 3238 3244 3249 54 3241 3249 3241 3242 3241 3249 3242 3243 3241 3243 3242 3240 3232 3239 3241 3247 3240 3242 3242 3238 3249 3241 3245 3240 3249 3249 3240 3244 3240 3239 3241 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/irqflags.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/bug.h> #include "printk_ringbuffer.h" #include "internal.h" /** * DOC: printk_ringbuffer overview * * Data Structure * -------------- * The printk_ringbuffer is made up of 3 internal ringbuffers: * * desc_ring * A ring of descriptors and their meta data (such as sequence number, * timestamp, loglevel, etc.) as well as internal state information about * the record and logical positions specifying where in the other * ringbuffer the text strings are located. * * text_data_ring * A ring of data blocks. A data block consists of an unsigned long * integer (ID) that maps to a desc_ring index followed by the text * string of the record. * * The internal state information of a descriptor is the key element to allow * readers and writers to locklessly synchronize access to the data. * * Implementation * -------------- * * Descriptor Ring * ~~~~~~~~~~~~~~~ * The descriptor ring is an array of descriptors. A descriptor contains * essential meta data to track the data of a printk record using * blk_lpos structs pointing to associated text data blocks (see * "Data Rings" below). Each descriptor is assigned an ID that maps * directly to index values of the descriptor array and has a state. The ID * and the state are bitwise combined into a single descriptor field named * @state_var, allowing ID and state to be synchronously and atomically * updated. * * Descriptors have four states: * * reserved * A writer is modifying the record. * * committed * The record and all its data are written. A writer can reopen the * descriptor (transitioning it back to reserved), but in the committed * state the data is consistent. * * finalized * The record and all its data are complete and available for reading. A * writer cannot reopen the descriptor. * * reusable * The record exists, but its text and/or meta data may no longer be * available. * * Querying the @state_var of a record requires providing the ID of the * descriptor to query. This can yield a possible fifth (pseudo) state: * * miss * The descriptor being queried has an unexpected ID. * * The descriptor ring has a @tail_id that contains the ID of the oldest * descriptor and @head_id that contains the ID of the newest descriptor. * * When a new descriptor should be created (and the ring is full), the tail * descriptor is invalidated by first transitioning to the reusable state and * then invalidating all tail data blocks up to and including the data blocks * associated with the tail descriptor (for the text ring). Then * @tail_id is advanced, followed by advancing @head_id. And finally the * @state_var of the new descriptor is initialized to the new ID and reserved * state. * * The @tail_id can only be advanced if the new @tail_id would be in the * committed or reusable queried state. This makes it possible that a valid * sequence number of the tail is always available. * * Descriptor Finalization * ~~~~~~~~~~~~~~~~~~~~~~~ * When a writer calls the commit function prb_commit(), record data is * fully stored and is consistent within the ringbuffer. However, a writer can * reopen that record, claiming exclusive access (as with prb_reserve()), and * modify that record. When finished, the writer must again commit the record. * * In order for a record to be made available to readers (and also become * recyclable for writers), it must be finalized. A finalized record cannot be * reopened and can never become "unfinalized". Record finalization can occur * in three different scenarios: * * 1) A writer can simultaneously commit and finalize its record by calling * prb_final_commit() instead of prb_commit(). * * 2) When a new record is reserved and the previous record has been * committed via prb_commit(), that previous record is automatically * finalized. * * 3) When a record is committed via prb_commit() and a newer record * already exists, the record being committed is automatically finalized. * * Data Ring * ~~~~~~~~~ * The text data ring is a byte array composed of data blocks. Data blocks are * referenced by blk_lpos structs that point to the logical position of the * beginning of a data block and the beginning of the next adjacent data * block. Logical positions are mapped directly to index values of the byte * array ringbuffer. * * Each data block consists of an ID followed by the writer data. The ID is * the identifier of a descriptor that is associated with the data block. A * given data block is considered valid if all of the following conditions * are met: * * 1) The descriptor associated with the data block is in the committed * or finalized queried state. * * 2) The blk_lpos struct within the descriptor associated with the data * block references back to the same data block. * * 3) The data block is within the head/tail logical position range. * * If the writer data of a data block would extend beyond the end of the * byte array, only the ID of the data block is stored at the logical * position and the full data block (ID and writer data) is stored at the * beginning of the byte array. The referencing blk_lpos will point to the * ID before the wrap and the next data block will be at the logical * position adjacent the full data block after the wrap. * * Data rings have a @tail_lpos that points to the beginning of the oldest * data block and a @head_lpos that points to the logical position of the * next (not yet existing) data block. * * When a new data block should be created (and the ring is full), tail data * blocks will first be invalidated by putting their associated descriptors * into the reusable state and then pushing the @tail_lpos forward beyond * them. Then the @head_lpos is pushed forward and is associated with a new * descriptor. If a data block is not valid, the @tail_lpos cannot be * advanced beyond it. * * Info Array * ~~~~~~~~~~ * The general meta data of printk records are stored in printk_info structs, * stored in an array with the same number of elements as the descriptor ring. * Each info corresponds to the descriptor of the same index in the * descriptor ring. Info validity is confirmed by evaluating the corresponding * descriptor before and after loading the info. * * Usage * ----- * Here are some simple examples demonstrating writers and readers. For the * examples a global ringbuffer (test_rb) is available (which is not the * actual ringbuffer used by printk):: * * DEFINE_PRINTKRB(test_rb, 15, 5); * * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of * 1 MiB (2 ^ (15 + 5)) for text data. * * Sample writer code:: * * const char *textstr = "message text"; * struct prb_reserved_entry e; * struct printk_record r; * * // specify how much to allocate * prb_rec_init_wr(&r, strlen(textstr) + 1); * * if (prb_reserve(&e, &test_rb, &r)) { * snprintf(r.text_buf, r.text_buf_size, "%s", textstr); * * r.info->text_len = strlen(textstr); * r.info->ts_nsec = local_clock(); * r.info->caller_id = printk_caller_id(); * * // commit and finalize the record * prb_final_commit(&e); * } * * Note that additional writer functions are available to extend a record * after it has been committed but not yet finalized. This can be done as * long as no new records have been reserved and the caller is the same. * * Sample writer code (record extending):: * * // alternate rest of previous example * * r.info->text_len = strlen(textstr); * r.info->ts_nsec = local_clock(); * r.info->caller_id = printk_caller_id(); * * // commit the record (but do not finalize yet) * prb_commit(&e); * } * * ... * * // specify additional 5 bytes text space to extend * prb_rec_init_wr(&r, 5); * * // try to extend, but only if it does not exceed 32 bytes * if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id(), 32)) { * snprintf(&r.text_buf[r.info->text_len], * r.text_buf_size - r.info->text_len, "hello"); * * r.info->text_len += 5; * * // commit and finalize the record * prb_final_commit(&e); * } * * Sample reader code:: * * struct printk_info info; * struct printk_record r; * char text_buf[32]; * u64 seq; * * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf)); * * prb_for_each_record(0, &test_rb, &seq, &r) { * if (info.seq != seq) * pr_warn("lost %llu records\n", info.seq - seq); * * if (info.text_len > r.text_buf_size) { * pr_warn("record %llu text truncated\n", info.seq); * text_buf[r.text_buf_size - 1] = 0; * } * * pr_info("%llu: %llu: %s\n", info.seq, info.ts_nsec, * &text_buf[0]); * } * * Note that additional less convenient reader functions are available to * allow complex record access. * * ABA Issues * ~~~~~~~~~~ * To help avoid ABA issues, descriptors are referenced by IDs (array index * values combined with tagged bits counting array wraps) and data blocks are * referenced by logical positions (array index values combined with tagged * bits counting array wraps). However, on 32-bit systems the number of * tagged bits is relatively small such that an ABA incident is (at least * theoretically) possible. For example, if 4 million maximally sized (1KiB) * printk messages were to occur in NMI context on a 32-bit system, the * interrupted context would not be able to recognize that the 32-bit integer * completely wrapped and thus represents a different data block than the one * the interrupted context expects. * * To help combat this possibility, additional state checking is performed * (such as using cmpxchg() even though set() would suffice). These extra * checks are commented as such and will hopefully catch any ABA issue that * a 32-bit system might experience. * * Memory Barriers * ~~~~~~~~~~~~~~~ * Multiple memory barriers are used. To simplify proving correctness and * generating litmus tests, lines of code related to memory barriers * (loads, stores, and the associated memory barriers) are labeled:: * * LMM(function:letter) * * Comments reference the labels using only the "function:letter" part. * * The memory barrier pairs and their ordering are: * * desc_reserve:D / desc_reserve:B * push descriptor tail (id), then push descriptor head (id) * * desc_reserve:D / data_push_tail:B * push data tail (lpos), then set new descriptor reserved (state) * * desc_reserve:D / desc_push_tail:C * push descriptor tail (id), then set new descriptor reserved (state) * * desc_reserve:D / prb_first_seq:C * push descriptor tail (id), then set new descriptor reserved (state) * * desc_reserve:F / desc_read:D * set new descriptor id and reserved (state), then allow writer changes * * data_alloc:A (or data_realloc:A) / desc_read:D * set old descriptor reusable (state), then modify new data block area * * data_alloc:A (or data_realloc:A) / data_push_tail:B * push data tail (lpos), then modify new data block area * * _prb_commit:B / desc_read:B * store writer changes, then set new descriptor committed (state) * * desc_reopen_last:A / _prb_commit:B * set descriptor reserved (state), then read descriptor data * * _prb_commit:B / desc_reserve:D * set new descriptor committed (state), then check descriptor head (id) * * data_push_tail:D / data_push_tail:A * set descriptor reusable (state), then push data tail (lpos) * * desc_push_tail:B / desc_reserve:D * set descriptor reusable (state), then push descriptor tail (id) * * desc_update_last_finalized:A / desc_last_finalized_seq:A * store finalized record, then set new highest finalized sequence number */ #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits) #define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1) #define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits) #define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1) /* Determine the data array index from a logical position. */ #define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring)) /* Determine the desc array index from an ID or sequence number. */ #define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring)) /* Determine how many times the data array has wrapped. */ #define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits) /* Determine if a logical position refers to a data-less block. */ #define LPOS_DATALESS(lpos) ((lpos) & 1UL) #define BLK_DATALESS(blk) (LPOS_DATALESS((blk)->begin) && \ LPOS_DATALESS((blk)->next)) /* Get the logical position at index 0 of the current wrap. */ #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \ ((lpos) & ~DATA_SIZE_MASK(data_ring)) /* Get the ID for the same index of the previous wrap as the given ID. */ #define DESC_ID_PREV_WRAP(desc_ring, id) \ DESC_ID((id) - DESCS_COUNT(desc_ring)) /* * A data block: mapped directly to the beginning of the data block area * specified as a logical position within the data ring. * * @id: the ID of the associated descriptor * @data: the writer data * * Note that the size of a data block is only known by its associated * descriptor. */ struct prb_data_block { unsigned long id; char data[]; }; /* * Return the descriptor associated with @n. @n can be either a * descriptor ID or a sequence number. */ static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n) { return &desc_ring->descs[DESC_INDEX(desc_ring, n)]; } /* * Return the printk_info associated with @n. @n can be either a * descriptor ID or a sequence number. */ static struct printk_info *to_info(struct prb_desc_ring *desc_ring, u64 n) { return &desc_ring->infos[DESC_INDEX(desc_ring, n)]; } static struct prb_data_block *to_block(struct prb_data_ring *data_ring, unsigned long begin_lpos) { return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)]; } /* * Increase the data size to account for data block meta data plus any * padding so that the adjacent data block is aligned on the ID size. */ static unsigned int to_blk_size(unsigned int size) { struct prb_data_block *db = NULL; size += sizeof(*db); size = ALIGN(size, sizeof(db->id)); return size; } /* * Sanity checker for reserve size. The ringbuffer code assumes that a data * block does not exceed the maximum possible size that could fit within the * ringbuffer. This function provides that basic size check so that the * assumption is safe. */ static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size) { struct prb_data_block *db = NULL; if (size == 0) return true; /* * Ensure the alignment padded size could possibly fit in the data * array. The largest possible data block must still leave room for * at least the ID of the next block. */ size = to_blk_size(size); if (size > DATA_SIZE(data_ring) - sizeof(db->id)) return false; return true; } /* Query the state of a descriptor. */ static enum desc_state get_desc_state(unsigned long id, unsigned long state_val) { if (id != DESC_ID(state_val)) return desc_miss; return DESC_STATE(state_val); } /* * Get a copy of a specified descriptor and return its queried state. If the * descriptor is in an inconsistent state (miss or reserved), the caller can * only expect the descriptor's @state_var field to be valid. * * The sequence number and caller_id can be optionally retrieved. Like all * non-state_var data, they are only valid if the descriptor is in a * consistent state. */ static enum desc_state desc_read(struct prb_desc_ring *desc_ring, unsigned long id, struct prb_desc *desc_out, u64 *seq_out, u32 *caller_id_out) { struct printk_info *info = to_info(desc_ring, id); struct prb_desc *desc = to_desc(desc_ring, id); atomic_long_t *state_var = &desc->state_var; enum desc_state d_state; unsigned long state_val; /* Check the descriptor state. */ state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */ d_state = get_desc_state(id, state_val); if (d_state == desc_miss || d_state == desc_reserved) { /* * The descriptor is in an inconsistent state. Set at least * @state_var so that the caller can see the details of * the inconsistent state. */ goto out; } /* * Guarantee the state is loaded before copying the descriptor * content. This avoids copying obsolete descriptor content that might * not apply to the descriptor state. This pairs with _prb_commit:B. * * Memory barrier involvement: * * If desc_read:A reads from _prb_commit:B, then desc_read:C reads * from _prb_commit:A. * * Relies on: * * WMB from _prb_commit:A to _prb_commit:B * matching * RMB from desc_read:A to desc_read:C */ smp_rmb(); /* LMM(desc_read:B) */ /* * Copy the descriptor data. The data is not valid until the * state has been re-checked. A memcpy() for all of @desc * cannot be used because of the atomic_t @state_var field. */ if (desc_out) { memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos, sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */ } if (seq_out) *seq_out = info->seq; /* also part of desc_read:C */ if (caller_id_out) *caller_id_out = info->caller_id; /* also part of desc_read:C */ /* * 1. Guarantee the descriptor content is loaded before re-checking * the state. This avoids reading an obsolete descriptor state * that may not apply to the copied content. This pairs with * desc_reserve:F. * * Memory barrier involvement: * * If desc_read:C reads from desc_reserve:G, then desc_read:E * reads from desc_reserve:F. * * Relies on: * * WMB from desc_reserve:F to desc_reserve:G * matching * RMB from desc_read:C to desc_read:E * * 2. Guarantee the record data is loaded before re-checking the * state. This avoids reading an obsolete descriptor state that may * not apply to the copied data. This pairs with data_alloc:A and * data_realloc:A. * * Memory barrier involvement: * * If copy_data:A reads from data_alloc:B, then desc_read:E * reads from desc_make_reusable:A. * * Relies on: * * MB from desc_make_reusable:A to data_alloc:B * matching * RMB from desc_read:C to desc_read:E * * Note: desc_make_reusable:A and data_alloc:B can be different * CPUs. However, the data_alloc:B CPU (which performs the * full memory barrier) must have previously seen * desc_make_reusable:A. */ smp_rmb(); /* LMM(desc_read:D) */ /* * The data has been copied. Return the current descriptor state, * which may have changed since the load above. */ state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */ d_state = get_desc_state(id, state_val); out: if (desc_out) atomic_long_set(&desc_out->state_var, state_val); return d_state; } /* * Take a specified descriptor out of the finalized state by attempting * the transition from finalized to reusable. Either this context or some * other context will have been successful. */ static void desc_make_reusable(struct prb_desc_ring *desc_ring, unsigned long id) { unsigned long val_finalized = DESC_SV(id, desc_finalized); unsigned long val_reusable = DESC_SV(id, desc_reusable); struct prb_desc *desc = to_desc(desc_ring, id); atomic_long_t *state_var = &desc->state_var; atomic_long_cmpxchg_relaxed(state_var, val_finalized, val_reusable); /* LMM(desc_make_reusable:A) */ } /* * Given the text data ring, put the associated descriptor of each * data block from @lpos_begin until @lpos_end into the reusable state. * * If there is any problem making the associated descriptor reusable, either * the descriptor has not yet been finalized or another writer context has * already pushed the tail lpos past the problematic data block. Regardless, * on error the caller can re-load the tail lpos to determine the situation. */ static bool data_make_reusable(struct printk_ringbuffer *rb, unsigned long lpos_begin, unsigned long lpos_end, unsigned long *lpos_out) { struct prb_data_ring *data_ring = &rb->text_data_ring; struct prb_desc_ring *desc_ring = &rb->desc_ring; struct prb_data_block *blk; enum desc_state d_state; struct prb_desc desc; struct prb_data_blk_lpos *blk_lpos = &desc.text_blk_lpos; unsigned long id; /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */ while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { blk = to_block(data_ring, lpos_begin); /* * Load the block ID from the data block. This is a data race * against a writer that may have newly reserved this data * area. If the loaded value matches a valid descriptor ID, * the blk_lpos of that descriptor will be checked to make * sure it points back to this data block. If the check fails, * the data area has been recycled by another writer. */ id = blk->id; /* LMM(data_make_reusable:A) */ d_state = desc_read(desc_ring, id, &desc, NULL, NULL); /* LMM(data_make_reusable:B) */ switch (d_state) { case desc_miss: case desc_reserved: case desc_committed: return false; case desc_finalized: /* * This data block is invalid if the descriptor * does not point back to it. */ if (blk_lpos->begin != lpos_begin) return false; desc_make_reusable(desc_ring, id); break; case desc_reusable: /* * This data block is invalid if the descriptor * does not point back to it. */ if (blk_lpos->begin != lpos_begin) return false; break; } /* Advance @lpos_begin to the next data block. */ lpos_begin = blk_lpos->next; } *lpos_out = lpos_begin; return true; } /* * Advance the data ring tail to at least @lpos. This function puts * descriptors into the reusable state if the tail is pushed beyond * their associated data block. */ static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos) { struct prb_data_ring *data_ring = &rb->text_data_ring; unsigned long tail_lpos_new; unsigned long tail_lpos; unsigned long next_lpos; /* If @lpos is from a data-less block, there is nothing to do. */ if (LPOS_DATALESS(lpos)) return true; /* * Any descriptor states that have transitioned to reusable due to the * data tail being pushed to this loaded value will be visible to this * CPU. This pairs with data_push_tail:D. * * Memory barrier involvement: * * If data_push_tail:A reads from data_push_tail:D, then this CPU can * see desc_make_reusable:A. * * Relies on: * * MB from desc_make_reusable:A to data_push_tail:D * matches * READFROM from data_push_tail:D to data_push_tail:A * thus * READFROM from desc_make_reusable:A to this CPU */ tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */ /* * Loop until the tail lpos is at or beyond @lpos. This condition * may already be satisfied, resulting in no full memory barrier * from data_push_tail:D being performed. However, since this CPU * sees the new tail lpos, any descriptor states that transitioned to * the reusable state must already be visible. */ while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) { /* * Make all descriptors reusable that are associated with * data blocks before @lpos. */ if (!data_make_reusable(rb, tail_lpos, lpos, &next_lpos)) { /* * 1. Guarantee the block ID loaded in * data_make_reusable() is performed before * reloading the tail lpos. The failed * data_make_reusable() may be due to a newly * recycled data area causing the tail lpos to * have been previously pushed. This pairs with * data_alloc:A and data_realloc:A. * * Memory barrier involvement: * * If data_make_reusable:A reads from data_alloc:B, * then data_push_tail:C reads from * data_push_tail:D. * * Relies on: * * MB from data_push_tail:D to data_alloc:B * matching * RMB from data_make_reusable:A to * data_push_tail:C * * Note: data_push_tail:D and data_alloc:B can be * different CPUs. However, the data_alloc:B * CPU (which performs the full memory * barrier) must have previously seen * data_push_tail:D. * * 2. Guarantee the descriptor state loaded in * data_make_reusable() is performed before * reloading the tail lpos. The failed * data_make_reusable() may be due to a newly * recycled descriptor causing the tail lpos to * have been previously pushed. This pairs with * desc_reserve:D. * * Memory barrier involvement: * * If data_make_reusable:B reads from * desc_reserve:F, then data_push_tail:C reads * from data_push_tail:D. * * Relies on: * * MB from data_push_tail:D to desc_reserve:F * matching * RMB from data_make_reusable:B to * data_push_tail:C * * Note: data_push_tail:D and desc_reserve:F can * be different CPUs. However, the * desc_reserve:F CPU (which performs the * full memory barrier) must have previously * seen data_push_tail:D. */ smp_rmb(); /* LMM(data_push_tail:B) */ tail_lpos_new = atomic_long_read(&data_ring->tail_lpos ); /* LMM(data_push_tail:C) */ if (tail_lpos_new == tail_lpos) return false; /* Another CPU pushed the tail. Try again. */ tail_lpos = tail_lpos_new; continue; } /* * Guarantee any descriptor states that have transitioned to * reusable are stored before pushing the tail lpos. A full * memory barrier is needed since other CPUs may have made * the descriptor states reusable. This pairs with * data_push_tail:A. */ if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos, next_lpos)) { /* LMM(data_push_tail:D) */ break; } } return true; } /* * Advance the desc ring tail. This function advances the tail by one * descriptor, thus invalidating the oldest descriptor. Before advancing * the tail, the tail descriptor is made reusable and all data blocks up to * and including the descriptor's data block are invalidated (i.e. the data * ring tail is pushed past the data block of the descriptor being made * reusable). */ static bool desc_push_tail(struct printk_ringbuffer *rb, unsigned long tail_id) { struct prb_desc_ring *desc_ring = &rb->desc_ring; enum desc_state d_state; struct prb_desc desc; d_state = desc_read(desc_ring, tail_id, &desc, NULL, NULL); switch (d_state) { case desc_miss: /* * If the ID is exactly 1 wrap behind the expected, it is * in the process of being reserved by another writer and * must be considered reserved. */ if (DESC_ID(atomic_long_read(&desc.state_var)) == DESC_ID_PREV_WRAP(desc_ring, tail_id)) { return false; } /* * The ID has changed. Another writer must have pushed the * tail and recycled the descriptor already. Success is * returned because the caller is only interested in the * specified tail being pushed, which it was. */ return true; case desc_reserved: case desc_committed: return false; case desc_finalized: desc_make_reusable(desc_ring, tail_id); break; case desc_reusable: break; } /* * Data blocks must be invalidated before their associated * descriptor can be made available for recycling. Invalidating * them later is not possible because there is no way to trust * data blocks once their associated descriptor is gone. */ if (!data_push_tail(rb, desc.text_blk_lpos.next)) return false; /* * Check the next descriptor after @tail_id before pushing the tail * to it because the tail must always be in a finalized or reusable * state. The implementation of prb_first_seq() relies on this. * * A successful read implies that the next descriptor is less than or * equal to @head_id so there is no risk of pushing the tail past the * head. */ d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc, NULL, NULL); /* LMM(desc_push_tail:A) */ if (d_state == desc_finalized || d_state == desc_reusable) { /* * Guarantee any descriptor states that have transitioned to * reusable are stored before pushing the tail ID. This allows * verifying the recycled descriptor state. A full memory * barrier is needed since other CPUs may have made the * descriptor states reusable. This pairs with desc_reserve:D. */ atomic_long_cmpxchg(&desc_ring->tail_id, tail_id, DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */ } else { /* * Guarantee the last state load from desc_read() is before * reloading @tail_id in order to see a new tail ID in the * case that the descriptor has been recycled. This pairs * with desc_reserve:D. * * Memory barrier involvement: * * If desc_push_tail:A reads from desc_reserve:F, then * desc_push_tail:D reads from desc_push_tail:B. * * Relies on: * * MB from desc_push_tail:B to desc_reserve:F * matching * RMB from desc_push_tail:A to desc_push_tail:D * * Note: desc_push_tail:B and desc_reserve:F can be different * CPUs. However, the desc_reserve:F CPU (which performs * the full memory barrier) must have previously seen * desc_push_tail:B. */ smp_rmb(); /* LMM(desc_push_tail:C) */ /* * Re-check the tail ID. The descriptor following @tail_id is * not in an allowed tail state. But if the tail has since * been moved by another CPU, then it does not matter. */ if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */ return false; } return true; } /* Reserve a new descriptor, invalidating the oldest if necessary. */ static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) { struct prb_desc_ring *desc_ring = &rb->desc_ring; unsigned long prev_state_val; unsigned long id_prev_wrap; struct prb_desc *desc; unsigned long head_id; unsigned long id; head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */ do { id = DESC_ID(head_id + 1); id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id); /* * Guarantee the head ID is read before reading the tail ID. * Since the tail ID is updated before the head ID, this * guarantees that @id_prev_wrap is never ahead of the tail * ID. This pairs with desc_reserve:D. * * Memory barrier involvement: * * If desc_reserve:A reads from desc_reserve:D, then * desc_reserve:C reads from desc_push_tail:B. * * Relies on: * * MB from desc_push_tail:B to desc_reserve:D * matching * RMB from desc_reserve:A to desc_reserve:C * * Note: desc_push_tail:B and desc_reserve:D can be different * CPUs. However, the desc_reserve:D CPU (which performs * the full memory barrier) must have previously seen * desc_push_tail:B. */ smp_rmb(); /* LMM(desc_reserve:B) */ if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id )) { /* LMM(desc_reserve:C) */ /* * Make space for the new descriptor by * advancing the tail. */ if (!desc_push_tail(rb, id_prev_wrap)) return false; } /* * 1. Guarantee the tail ID is read before validating the * recycled descriptor state. A read memory barrier is * sufficient for this. This pairs with desc_push_tail:B. * * Memory barrier involvement: * * If desc_reserve:C reads from desc_push_tail:B, then * desc_reserve:E reads from desc_make_reusable:A. * * Relies on: * * MB from desc_make_reusable:A to desc_push_tail:B * matching * RMB from desc_reserve:C to desc_reserve:E * * Note: desc_make_reusable:A and desc_push_tail:B can be * different CPUs. However, the desc_push_tail:B CPU * (which performs the full memory barrier) must have * previously seen desc_make_reusable:A. * * 2. Guarantee the tail ID is stored before storing the head * ID. This pairs with desc_reserve:B. * * 3. Guarantee any data ring tail changes are stored before * recycling the descriptor. Data ring tail changes can * happen via desc_push_tail()->data_push_tail(). A full * memory barrier is needed since another CPU may have * pushed the data ring tails. This pairs with * data_push_tail:B. * * 4. Guarantee a new tail ID is stored before recycling the * descriptor. A full memory barrier is needed since * another CPU may have pushed the tail ID. This pairs * with desc_push_tail:C and this also pairs with * prb_first_seq:C. * * 5. Guarantee the head ID is stored before trying to * finalize the previous descriptor. This pairs with * _prb_commit:B. */ } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id, id)); /* LMM(desc_reserve:D) */ desc = to_desc(desc_ring, id); /* * If the descriptor has been recycled, verify the old state val. * See "ABA Issues" about why this verification is performed. */ prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */ if (prev_state_val && get_desc_state(id_prev_wrap, prev_state_val) != desc_reusable) { WARN_ON_ONCE(1); return false; } /* * Assign the descriptor a new ID and set its state to reserved. * See "ABA Issues" about why cmpxchg() instead of set() is used. * * Guarantee the new descriptor ID and state is stored before making * any other changes. A write memory barrier is sufficient for this. * This pairs with desc_read:D. */ if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val, DESC_SV(id, desc_reserved))) { /* LMM(desc_reserve:F) */ WARN_ON_ONCE(1); return false; } /* Now data in @desc can be modified: LMM(desc_reserve:G) */ *id_out = id; return true; } /* Determine the end of a data block. */ static unsigned long get_next_lpos(struct prb_data_ring *data_ring, unsigned long lpos, unsigned int size) { unsigned long begin_lpos; unsigned long next_lpos; begin_lpos = lpos; next_lpos = lpos + size; /* First check if the data block does not wrap. */ if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos)) return next_lpos; /* Wrapping data blocks store their data at the beginning. */ return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size); } /* * Allocate a new data block, invalidating the oldest data block(s) * if necessary. This function also associates the data block with * a specified descriptor. */ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size, struct prb_data_blk_lpos *blk_lpos, unsigned long id) { struct prb_data_ring *data_ring = &rb->text_data_ring; struct prb_data_block *blk; unsigned long begin_lpos; unsigned long next_lpos; if (size == 0) { /* * Data blocks are not created for empty lines. Instead, the * reader will recognize these special lpos values and handle * it appropriately. */ blk_lpos->begin = EMPTY_LINE_LPOS; blk_lpos->next = EMPTY_LINE_LPOS; return NULL; } size = to_blk_size(size); begin_lpos = atomic_long_read(&data_ring->head_lpos); do { next_lpos = get_next_lpos(data_ring, begin_lpos, size); if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) { /* Failed to allocate, specify a data-less block. */ blk_lpos->begin = FAILED_LPOS; blk_lpos->next = FAILED_LPOS; return NULL; } /* * 1. Guarantee any descriptor states that have transitioned * to reusable are stored before modifying the newly * allocated data area. A full memory barrier is needed * since other CPUs may have made the descriptor states * reusable. See data_push_tail:A about why the reusable * states are visible. This pairs with desc_read:D. * * 2. Guarantee any updated tail lpos is stored before * modifying the newly allocated data area. Another CPU may * be in data_make_reusable() and is reading a block ID * from this area. data_make_reusable() can handle reading * a garbage block ID value, but then it must be able to * load a new tail lpos. A full memory barrier is needed * since other CPUs may have updated the tail lpos. This * pairs with data_push_tail:B. */ } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos, next_lpos)); /* LMM(data_alloc:A) */ blk = to_block(data_ring, begin_lpos); blk->id = id; /* LMM(data_alloc:B) */ if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) { /* Wrapping data blocks store their data at the beginning. */ blk = to_block(data_ring, 0); /* * Store the ID on the wrapped block for consistency. * The printk_ringbuffer does not actually use it. */ blk->id = id; } blk_lpos->begin = begin_lpos; blk_lpos->next = next_lpos; return &blk->data[0]; } /* * Try to resize an existing data block associated with the descriptor * specified by @id. If the resized data block should become wrapped, it * copies the old data to the new data block. If @size yields a data block * with the same or less size, the data block is left as is. * * Fail if this is not the last allocated data block or if there is not * enough space or it is not possible make enough space. * * Return a pointer to the beginning of the entire data buffer or NULL on * failure. */ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size, struct prb_data_blk_lpos *blk_lpos, unsigned long id) { struct prb_data_ring *data_ring = &rb->text_data_ring; struct prb_data_block *blk; unsigned long head_lpos; unsigned long next_lpos; bool wrapped; /* Reallocation only works if @blk_lpos is the newest data block. */ head_lpos = atomic_long_read(&data_ring->head_lpos); if (head_lpos != blk_lpos->next) return NULL; /* Keep track if @blk_lpos was a wrapping data block. */ wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next)); size = to_blk_size(size); next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size); /* If the data block does not increase, there is nothing to do. */ if (head_lpos - next_lpos < DATA_SIZE(data_ring)) { if (wrapped) blk = to_block(data_ring, 0); else blk = to_block(data_ring, blk_lpos->begin); return &blk->data[0]; } if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) return NULL; /* The memory barrier involvement is the same as data_alloc:A. */ if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos, next_lpos)) { /* LMM(data_realloc:A) */ return NULL; } blk = to_block(data_ring, blk_lpos->begin); if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) { struct prb_data_block *old_blk = blk; /* Wrapping data blocks store their data at the beginning. */ blk = to_block(data_ring, 0); /* * Store the ID on the wrapped block for consistency. * The printk_ringbuffer does not actually use it. */ blk->id = id; if (!wrapped) { /* * Since the allocated space is now in the newly * created wrapping data block, copy the content * from the old data block. */ memcpy(&blk->data[0], &old_blk->data[0], (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id)); } } blk_lpos->next = next_lpos; return &blk->data[0]; } /* Return the number of bytes used by a data block. */ static unsigned int space_used(struct prb_data_ring *data_ring, struct prb_data_blk_lpos *blk_lpos) { /* Data-less blocks take no space. */ if (BLK_DATALESS(blk_lpos)) return 0; if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) { /* Data block does not wrap. */ return (DATA_INDEX(data_ring, blk_lpos->next) - DATA_INDEX(data_ring, blk_lpos->begin)); } /* * For wrapping data blocks, the trailing (wasted) space is * also counted. */ return (DATA_INDEX(data_ring, blk_lpos->next) + DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin)); } /* * Given @blk_lpos, return a pointer to the writer data from the data block * and calculate the size of the data part. A NULL pointer is returned if * @blk_lpos specifies values that could never be legal. * * This function (used by readers) performs strict validation on the lpos * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is * triggered if an internal error is detected. */ static const char *get_data(struct prb_data_ring *data_ring, struct prb_data_blk_lpos *blk_lpos, unsigned int *data_size) { struct prb_data_block *db; /* Data-less data block description. */ if (BLK_DATALESS(blk_lpos)) { /* * Records that are just empty lines are also valid, even * though they do not have a data block. For such records * explicitly return empty string data to signify success. */ if (blk_lpos->begin == EMPTY_LINE_LPOS && blk_lpos->next == EMPTY_LINE_LPOS) { *data_size = 0; return ""; } /* Data lost, invalid, or otherwise unavailable. */ return NULL; } /* Regular data block: @begin less than @next and in same wrap. */ if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) && blk_lpos->begin < blk_lpos->next) { db = to_block(data_ring, blk_lpos->begin); *data_size = blk_lpos->next - blk_lpos->begin; /* Wrapping data block: @begin is one wrap behind @next. */ } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) == DATA_WRAPS(data_ring, blk_lpos->next)) { db = to_block(data_ring, 0); *data_size = DATA_INDEX(data_ring, blk_lpos->next); /* Illegal block description. */ } else { WARN_ON_ONCE(1); return NULL; } /* A valid data block will always be aligned to the ID size. */ if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) || WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) { return NULL; } /* A valid data block will always have at least an ID. */ if (WARN_ON_ONCE(*data_size < sizeof(db->id))) return NULL; /* Subtract block ID space from size to reflect data size. */ *data_size -= sizeof(db->id); return &db->data[0]; } /* * Attempt to transition the newest descriptor from committed back to reserved * so that the record can be modified by a writer again. This is only possible * if the descriptor is not yet finalized and the provided @caller_id matches. */ static struct prb_desc *desc_reopen_last(struct prb_desc_ring *desc_ring, u32 caller_id, unsigned long *id_out) { unsigned long prev_state_val; enum desc_state d_state; struct prb_desc desc; struct prb_desc *d; unsigned long id; u32 cid; id = atomic_long_read(&desc_ring->head_id); /* * To reduce unnecessarily reopening, first check if the descriptor * state and caller ID are correct. */ d_state = desc_read(desc_ring, id, &desc, NULL, &cid); if (d_state != desc_committed || cid != caller_id) return NULL; d = to_desc(desc_ring, id); prev_state_val = DESC_SV(id, desc_committed); /* * Guarantee the reserved state is stored before reading any * record data. A full memory barrier is needed because @state_var * modification is followed by reading. This pairs with _prb_commit:B. * * Memory barrier involvement: * * If desc_reopen_last:A reads from _prb_commit:B, then * prb_reserve_in_last:A reads from _prb_commit:A. * * Relies on: * * WMB from _prb_commit:A to _prb_commit:B * matching * MB If desc_reopen_last:A to prb_reserve_in_last:A */ if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, DESC_SV(id, desc_reserved))) { /* LMM(desc_reopen_last:A) */ return NULL; } *id_out = id; return d; } /** * prb_reserve_in_last() - Re-reserve and extend the space in the ringbuffer * used by the newest record. * * @e: The entry structure to setup. * @rb: The ringbuffer to re-reserve and extend data in. * @r: The record structure to allocate buffers for. * @caller_id: The caller ID of the caller (reserving writer). * @max_size: Fail if the extended size would be greater than this. * * This is the public function available to writers to re-reserve and extend * data. * * The writer specifies the text size to extend (not the new total size) by * setting the @text_buf_size field of @r. To ensure proper initialization * of @r, prb_rec_init_wr() should be used. * * This function will fail if @caller_id does not match the caller ID of the * newest record. In that case the caller must reserve new data using * prb_reserve(). * * Context: Any context. Disables local interrupts on success. * Return: true if text data could be extended, otherwise false. * * On success: * * - @r->text_buf points to the beginning of the entire text buffer. * * - @r->text_buf_size is set to the new total size of the buffer. * * - @r->info is not touched so that @r->info->text_len could be used * to append the text. * * - prb_record_text_space() can be used on @e to query the new * actually used space. * * Important: All @r->info fields will already be set with the current values * for the record. I.e. @r->info->text_len will be less than * @text_buf_size. Writers can use @r->info->text_len to know * where concatenation begins and writers should update * @r->info->text_len after concatenating. */ bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, struct printk_record *r, u32 caller_id, unsigned int max_size) { struct prb_desc_ring *desc_ring = &rb->desc_ring; struct printk_info *info; unsigned int data_size; struct prb_desc *d; unsigned long id; local_irq_save(e->irqflags); /* Transition the newest descriptor back to the reserved state. */ d = desc_reopen_last(desc_ring, caller_id, &id); if (!d) { local_irq_restore(e->irqflags); goto fail_reopen; } /* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */ info = to_info(desc_ring, id); /* * Set the @e fields here so that prb_commit() can be used if * anything fails from now on. */ e->rb = rb; e->id = id; /* * desc_reopen_last() checked the caller_id, but there was no * exclusive access at that point. The descriptor may have * changed since then. */ if (caller_id != info->caller_id) goto fail; if (BLK_DATALESS(&d->text_blk_lpos)) { if (WARN_ON_ONCE(info->text_len != 0)) { pr_warn_once("wrong text_len value (%hu, expecting 0)\n", info->text_len); info->text_len = 0; } if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) goto fail; if (r->text_buf_size > max_size) goto fail; r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id); } else { if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size)) goto fail; /* * Increase the buffer size to include the original size. If * the meta data (@text_len) is not sane, use the full data * block size. */ if (WARN_ON_ONCE(info->text_len > data_size)) { pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n", info->text_len, data_size); info->text_len = data_size; } r->text_buf_size += info->text_len; if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) goto fail; if (r->text_buf_size > max_size) goto fail; r->text_buf = data_realloc(rb, r->text_buf_size, &d->text_blk_lpos, id); } if (r->text_buf_size && !r->text_buf) goto fail; r->info = info; e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); return true; fail: prb_commit(e); /* prb_commit() re-enabled interrupts. */ fail_reopen: /* Make it clear to the caller that the re-reserve failed. */ memset(r, 0, sizeof(*r)); return false; } /* * @last_finalized_seq value guarantees that all records up to and including * this sequence number are finalized and can be read. The only exception are * too old records which have already been overwritten. * * It is also guaranteed that @last_finalized_seq only increases. * * Be aware that finalized records following non-finalized records are not * reported because they are not yet available to the reader. For example, * a new record stored via printk() will not be available to a printer if * it follows a record that has not been finalized yet. However, once that * non-finalized record becomes finalized, @last_finalized_seq will be * appropriately updated and the full set of finalized records will be * available to the printer. And since each printk() caller will either * directly print or trigger deferred printing of all available unprinted * records, all printk() messages will get printed. */ static u64 desc_last_finalized_seq(struct printk_ringbuffer *rb) { struct prb_desc_ring *desc_ring = &rb->desc_ring; unsigned long ulseq; /* * Guarantee the sequence number is loaded before loading the * associated record in order to guarantee that the record can be * seen by this CPU. This pairs with desc_update_last_finalized:A. */ ulseq = atomic_long_read_acquire(&desc_ring->last_finalized_seq ); /* LMM(desc_last_finalized_seq:A) */ return __ulseq_to_u64seq(rb, ulseq); } static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, struct printk_record *r, unsigned int *line_count); /* * Check if there are records directly following @last_finalized_seq that are * finalized. If so, update @last_finalized_seq to the latest of these * records. It is not allowed to skip over records that are not yet finalized. */ static void desc_update_last_finalized(struct printk_ringbuffer *rb) { struct prb_desc_ring *desc_ring = &rb->desc_ring; u64 old_seq = desc_last_finalized_seq(rb); unsigned long oldval; unsigned long newval; u64 finalized_seq; u64 try_seq; try_again: finalized_seq = old_seq; try_seq = finalized_seq + 1; /* Try to find later finalized records. */ while (_prb_read_valid(rb, &try_seq, NULL, NULL)) { finalized_seq = try_seq; try_seq++; } /* No update needed if no later finalized record was found. */ if (finalized_seq == old_seq) return; oldval = __u64seq_to_ulseq(old_seq); newval = __u64seq_to_ulseq(finalized_seq); /* * Set the sequence number of a later finalized record that has been * seen. * * Guarantee the record data is visible to other CPUs before storing * its sequence number. This pairs with desc_last_finalized_seq:A. * * Memory barrier involvement: * * If desc_last_finalized_seq:A reads from * desc_update_last_finalized:A, then desc_read:A reads from * _prb_commit:B. * * Relies on: * * RELEASE from _prb_commit:B to desc_update_last_finalized:A * matching * ACQUIRE from desc_last_finalized_seq:A to desc_read:A * * Note: _prb_commit:B and desc_update_last_finalized:A can be * different CPUs. However, the desc_update_last_finalized:A * CPU (which performs the release) must have previously seen * _prb_commit:B. */ if (!atomic_long_try_cmpxchg_release(&desc_ring->last_finalized_seq, &oldval, newval)) { /* LMM(desc_update_last_finalized:A) */ old_seq = __ulseq_to_u64seq(rb, oldval); goto try_again; } } /* * Attempt to finalize a specified descriptor. If this fails, the descriptor * is either already final or it will finalize itself when the writer commits. */ static void desc_make_final(struct printk_ringbuffer *rb, unsigned long id) { struct prb_desc_ring *desc_ring = &rb->desc_ring; unsigned long prev_state_val = DESC_SV(id, desc_committed); struct prb_desc *d = to_desc(desc_ring, id); if (atomic_long_try_cmpxchg_relaxed(&d->state_var, &prev_state_val, DESC_SV(id, desc_finalized))) { /* LMM(desc_make_final:A) */ desc_update_last_finalized(rb); } } /** * prb_reserve() - Reserve space in the ringbuffer. * * @e: The entry structure to setup. * @rb: The ringbuffer to reserve data in. * @r: The record structure to allocate buffers for. * * This is the public function available to writers to reserve data. * * The writer specifies the text size to reserve by setting the * @text_buf_size field of @r. To ensure proper initialization of @r, * prb_rec_init_wr() should be used. * * Context: Any context. Disables local interrupts on success. * Return: true if at least text data could be allocated, otherwise false. * * On success, the fields @info and @text_buf of @r will be set by this * function and should be filled in by the writer before committing. Also * on success, prb_record_text_space() can be used on @e to query the actual * space used for the text data block. * * Important: @info->text_len needs to be set correctly by the writer in * order for data to be readable and/or extended. Its value * is initialized to 0. */ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, struct printk_record *r) { struct prb_desc_ring *desc_ring = &rb->desc_ring; struct printk_info *info; struct prb_desc *d; unsigned long id; u64 seq; if (!data_check_size(&rb->text_data_ring, r->text_buf_size)) goto fail; /* * Descriptors in the reserved state act as blockers to all further * reservations once the desc_ring has fully wrapped. Disable * interrupts during the reserve/commit window in order to minimize * the likelihood of this happening. */ local_irq_save(e->irqflags); if (!desc_reserve(rb, &id)) { /* Descriptor reservation failures are tracked. */ atomic_long_inc(&rb->fail); local_irq_restore(e->irqflags); goto fail; } d = to_desc(desc_ring, id); info = to_info(desc_ring, id); /* * All @info fields (except @seq) are cleared and must be filled in * by the writer. Save @seq before clearing because it is used to * determine the new sequence number. */ seq = info->seq; memset(info, 0, sizeof(*info)); /* * Set the @e fields here so that prb_commit() can be used if * text data allocation fails. */ e->rb = rb; e->id = id; /* * Initialize the sequence number if it has "never been set". * Otherwise just increment it by a full wrap. * * @seq is considered "never been set" if it has a value of 0, * _except_ for @infos[0], which was specially setup by the ringbuffer * initializer and therefore is always considered as set. * * See the "Bootstrap" comment block in printk_ringbuffer.h for * details about how the initializer bootstraps the descriptors. */ if (seq == 0 && DESC_INDEX(desc_ring, id) != 0) info->seq = DESC_INDEX(desc_ring, id); else info->seq = seq + DESCS_COUNT(desc_ring); /* * New data is about to be reserved. Once that happens, previous * descriptors are no longer able to be extended. Finalize the * previous descriptor now so that it can be made available to * readers. (For seq==0 there is no previous descriptor.) */ if (info->seq > 0) desc_make_final(rb, DESC_ID(id - 1)); r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id); /* If text data allocation fails, a data-less record is committed. */ if (r->text_buf_size && !r->text_buf) { prb_commit(e); /* prb_commit() re-enabled interrupts. */ goto fail; } r->info = info; /* Record full text space used by record. */ e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos); return true; fail: /* Make it clear to the caller that the reserve failed. */ memset(r, 0, sizeof(*r)); return false; } /* Commit the data (possibly finalizing it) and restore interrupts. */ static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val) { struct prb_desc_ring *desc_ring = &e->rb->desc_ring; struct prb_desc *d = to_desc(desc_ring, e->id); unsigned long prev_state_val = DESC_SV(e->id, desc_reserved); /* Now the writer has finished all writing: LMM(_prb_commit:A) */ /* * Set the descriptor as committed. See "ABA Issues" about why * cmpxchg() instead of set() is used. * * 1 Guarantee all record data is stored before the descriptor state * is stored as committed. A write memory barrier is sufficient * for this. This pairs with desc_read:B and desc_reopen_last:A. * * 2. Guarantee the descriptor state is stored as committed before * re-checking the head ID in order to possibly finalize this * descriptor. This pairs with desc_reserve:D. * * Memory barrier involvement: * * If prb_commit:A reads from desc_reserve:D, then * desc_make_final:A reads from _prb_commit:B. * * Relies on: * * MB _prb_commit:B to prb_commit:A * matching * MB desc_reserve:D to desc_make_final:A */ if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, DESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */ WARN_ON_ONCE(1); } /* Restore interrupts, the reserve/commit window is finished. */ local_irq_restore(e->irqflags); } /** * prb_commit() - Commit (previously reserved) data to the ringbuffer. * * @e: The entry containing the reserved data information. * * This is the public function available to writers to commit data. * * Note that the data is not yet available to readers until it is finalized. * Finalizing happens automatically when space for the next record is * reserved. * * See prb_final_commit() for a version of this function that finalizes * immediately. * * Context: Any context. Enables local interrupts. */ void prb_commit(struct prb_reserved_entry *e) { struct prb_desc_ring *desc_ring = &e->rb->desc_ring; unsigned long head_id; _prb_commit(e, desc_committed); /* * If this descriptor is no longer the head (i.e. a new record has * been allocated), extending the data for this record is no longer * allowed and therefore it must be finalized. */ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */ if (head_id != e->id) desc_make_final(e->rb, e->id); } /** * prb_final_commit() - Commit and finalize (previously reserved) data to * the ringbuffer. * * @e: The entry containing the reserved data information. * * This is the public function available to writers to commit+finalize data. * * By finalizing, the data is made immediately available to readers. * * This function should only be used if there are no intentions of extending * this data using prb_reserve_in_last(). * * Context: Any context. Enables local interrupts. */ void prb_final_commit(struct prb_reserved_entry *e) { _prb_commit(e, desc_finalized); desc_update_last_finalized(e->rb); } /* * Count the number of lines in provided text. All text has at least 1 line * (even if @text_size is 0). Each '\n' processed is counted as an additional * line. */ static unsigned int count_lines(const char *text, unsigned int text_size) { unsigned int next_size = text_size; unsigned int line_count = 1; const char *next = text; while (next_size) { next = memchr(next, '\n', next_size); if (!next) break; line_count++; next++; next_size = text_size - (next - text); } return line_count; } /* * Given @blk_lpos, copy an expected @len of data into the provided buffer. * If @line_count is provided, count the number of lines in the data. * * This function (used by readers) performs strict validation on the data * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is * triggered if an internal error is detected. */ static bool copy_data(struct prb_data_ring *data_ring, struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf, unsigned int buf_size, unsigned int *line_count) { unsigned int data_size; const char *data; /* Caller might not want any data. */ if ((!buf || !buf_size) && !line_count) return true; data = get_data(data_ring, blk_lpos, &data_size); if (!data) return false; /* * Actual cannot be less than expected. It can be more than expected * because of the trailing alignment padding. * * Note that invalid @len values can occur because the caller loads * the value during an allowed data race. */ if (data_size < (unsigned int)len) return false; /* Caller interested in the line count? */ if (line_count) *line_count = count_lines(data, len); /* Caller interested in the data content? */ if (!buf || !buf_size) return true; data_size = min_t(unsigned int, buf_size, len); memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */ return true; } /* * This is an extended version of desc_read(). It gets a copy of a specified * descriptor. However, it also verifies that the record is finalized and has * the sequence number @seq. On success, 0 is returned. * * Error return values: * -EINVAL: A finalized record with sequence number @seq does not exist. * -ENOENT: A finalized record with sequence number @seq exists, but its data * is not available. This is a valid record, so readers should * continue with the next record. */ static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring, unsigned long id, u64 seq, struct prb_desc *desc_out) { struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos; enum desc_state d_state; u64 s; d_state = desc_read(desc_ring, id, desc_out, &s, NULL); /* * An unexpected @id (desc_miss) or @seq mismatch means the record * does not exist. A descriptor in the reserved or committed state * means the record does not yet exist for the reader. */ if (d_state == desc_miss || d_state == desc_reserved || d_state == desc_committed || s != seq) { return -EINVAL; } /* * A descriptor in the reusable state may no longer have its data * available; report it as existing but with lost data. Or the record * may actually be a record with lost data. */ if (d_state == desc_reusable || (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) { return -ENOENT; } return 0; } /* * Copy the ringbuffer data from the record with @seq to the provided * @r buffer. On success, 0 is returned. * * See desc_read_finalized_seq() for error return values. */ static int prb_read(struct printk_ringbuffer *rb, u64 seq, struct printk_record *r, unsigned int *line_count) { struct prb_desc_ring *desc_ring = &rb->desc_ring; struct printk_info *info = to_info(desc_ring, seq); struct prb_desc *rdesc = to_desc(desc_ring, seq); atomic_long_t *state_var = &rdesc->state_var; struct prb_desc desc; unsigned long id; int err; /* Extract the ID, used to specify the descriptor to read. */ id = DESC_ID(atomic_long_read(state_var)); /* Get a local copy of the correct descriptor (if available). */ err = desc_read_finalized_seq(desc_ring, id, seq, &desc); /* * If @r is NULL, the caller is only interested in the availability * of the record. */ if (err || !r) return err; /* If requested, copy meta data. */ if (r->info) memcpy(r->info, info, sizeof(*(r->info))); /* Copy text data. If it fails, this is a data-less record. */ if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len, r->text_buf, r->text_buf_size, line_count)) { return -ENOENT; } /* Ensure the record is still finalized and has the same @seq. */ return desc_read_finalized_seq(desc_ring, id, seq, &desc); } /* Get the sequence number of the tail descriptor. */ u64 prb_first_seq(struct printk_ringbuffer *rb) { struct prb_desc_ring *desc_ring = &rb->desc_ring; enum desc_state d_state; struct prb_desc desc; unsigned long id; u64 seq; for (;;) { id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */ d_state = desc_read(desc_ring, id, &desc, &seq, NULL); /* LMM(prb_first_seq:B) */ /* * This loop will not be infinite because the tail is * _always_ in the finalized or reusable state. */ if (d_state == desc_finalized || d_state == desc_reusable) break; /* * Guarantee the last state load from desc_read() is before * reloading @tail_id in order to see a new tail in the case * that the descriptor has been recycled. This pairs with * desc_reserve:D. * * Memory barrier involvement: * * If prb_first_seq:B reads from desc_reserve:F, then * prb_first_seq:A reads from desc_push_tail:B. * * Relies on: * * MB from desc_push_tail:B to desc_reserve:F * matching * RMB prb_first_seq:B to prb_first_seq:A */ smp_rmb(); /* LMM(prb_first_seq:C) */ } return seq; } /** * prb_next_reserve_seq() - Get the sequence number after the most recently * reserved record. * * @rb: The ringbuffer to get the sequence number from. * * This is the public function available to readers to see what sequence * number will be assigned to the next reserved record. * * Note that depending on the situation, this value can be equal to or * higher than the sequence number returned by prb_next_seq(). * * Context: Any context. * Return: The sequence number that will be assigned to the next record * reserved. */ u64 prb_next_reserve_seq(struct printk_ringbuffer *rb) { struct prb_desc_ring *desc_ring = &rb->desc_ring; unsigned long last_finalized_id; atomic_long_t *state_var; u64 last_finalized_seq; unsigned long head_id; struct prb_desc desc; unsigned long diff; struct prb_desc *d; int err; /* * It may not be possible to read a sequence number for @head_id. * So the ID of @last_finailzed_seq is used to calculate what the * sequence number of @head_id will be. */ try_again: last_finalized_seq = desc_last_finalized_seq(rb); /* * @head_id is loaded after @last_finalized_seq to ensure that * it points to the record with @last_finalized_seq or newer. * * Memory barrier involvement: * * If desc_last_finalized_seq:A reads from * desc_update_last_finalized:A, then * prb_next_reserve_seq:A reads from desc_reserve:D. * * Relies on: * * RELEASE from desc_reserve:D to desc_update_last_finalized:A * matching * ACQUIRE from desc_last_finalized_seq:A to prb_next_reserve_seq:A * * Note: desc_reserve:D and desc_update_last_finalized:A can be * different CPUs. However, the desc_update_last_finalized:A CPU * (which performs the release) must have previously seen * desc_read:C, which implies desc_reserve:D can be seen. */ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_next_reserve_seq:A) */ d = to_desc(desc_ring, last_finalized_seq); state_var = &d->state_var; /* Extract the ID, used to specify the descriptor to read. */ last_finalized_id = DESC_ID(atomic_long_read(state_var)); /* Ensure @last_finalized_id is correct. */ err = desc_read_finalized_seq(desc_ring, last_finalized_id, last_finalized_seq, &desc); if (err == -EINVAL) { if (last_finalized_seq == 0) { /* * No record has been finalized or even reserved yet. * * The @head_id is initialized such that the first * increment will yield the first record (seq=0). * Handle it separately to avoid a negative @diff * below. */ if (head_id == DESC0_ID(desc_ring->count_bits)) return 0; /* * One or more descriptors are already reserved. Use * the descriptor ID of the first one (@seq=0) for * the @diff below. */ last_finalized_id = DESC0_ID(desc_ring->count_bits) + 1; } else { /* Record must have been overwritten. Try again. */ goto try_again; } } /* Diff of known descriptor IDs to compute related sequence numbers. */ diff = head_id - last_finalized_id; /* * @head_id points to the most recently reserved record, but this * function returns the sequence number that will be assigned to the * next (not yet reserved) record. Thus +1 is needed. */ return (last_finalized_seq + diff + 1); } /* * Non-blocking read of a record. * * On success @seq is updated to the record that was read and (if provided) * @r and @line_count will contain the read/calculated data. * * On failure @seq is updated to a record that is not yet available to the * reader, but it will be the next record available to the reader. * * Note: When the current CPU is in panic, this function will skip over any * non-existent/non-finalized records in order to allow the panic CPU * to print any and all records that have been finalized. */ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, struct printk_record *r, unsigned int *line_count) { u64 tail_seq; int err; while ((err = prb_read(rb, *seq, r, line_count))) { tail_seq = prb_first_seq(rb); if (*seq < tail_seq) { /* * Behind the tail. Catch up and try again. This * can happen for -ENOENT and -EINVAL cases. */ *seq = tail_seq; } else if (err == -ENOENT) { /* Record exists, but the data was lost. Skip. */ (*seq)++; } else { /* * Non-existent/non-finalized record. Must stop. * * For panic situations it cannot be expected that * non-finalized records will become finalized. But * there may be other finalized records beyond that * need to be printed for a panic situation. If this * is the panic CPU, skip this * non-existent/non-finalized record unless non-panic * CPUs are still running and their debugging is * explicitly enabled. * * Note that new messages printed on panic CPU are * finalized when we are here. The only exception * might be the last message without trailing newline. * But it would have the sequence number returned * by "prb_next_reserve_seq() - 1". */ if (this_cpu_in_panic() && (!debug_non_panic_cpus || legacy_allow_panic_sync) && ((*seq + 1) < prb_next_reserve_seq(rb))) { (*seq)++; } else { return false; } } } return true; } /** * prb_read_valid() - Non-blocking read of a requested record or (if gone) * the next available record. * * @rb: The ringbuffer to read from. * @seq: The sequence number of the record to read. * @r: A record data buffer to store the read record to. * * This is the public function available to readers to read a record. * * The reader provides the @info and @text_buf buffers of @r to be * filled in. Any of the buffer pointers can be set to NULL if the reader * is not interested in that data. To ensure proper initialization of @r, * prb_rec_init_rd() should be used. * * Context: Any context. * Return: true if a record was read, otherwise false. * * On success, the reader must check r->info.seq to see which record was * actually read. This allows the reader to detect dropped records. * * Failure means @seq refers to a record not yet available to the reader. */ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, struct printk_record *r) { return _prb_read_valid(rb, &seq, r, NULL); } /** * prb_read_valid_info() - Non-blocking read of meta data for a requested * record or (if gone) the next available record. * * @rb: The ringbuffer to read from. * @seq: The sequence number of the record to read. * @info: A buffer to store the read record meta data to. * @line_count: A buffer to store the number of lines in the record text. * * This is the public function available to readers to read only the * meta data of a record. * * The reader provides the @info, @line_count buffers to be filled in. * Either of the buffer pointers can be set to NULL if the reader is not * interested in that data. * * Context: Any context. * Return: true if a record's meta data was read, otherwise false. * * On success, the reader must check info->seq to see which record meta data * was actually read. This allows the reader to detect dropped records. * * Failure means @seq refers to a record not yet available to the reader. */ bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, struct printk_info *info, unsigned int *line_count) { struct printk_record r; prb_rec_init_rd(&r, info, NULL, 0); return _prb_read_valid(rb, &seq, &r, line_count); } /** * prb_first_valid_seq() - Get the sequence number of the oldest available * record. * * @rb: The ringbuffer to get the sequence number from. * * This is the public function available to readers to see what the * first/oldest valid sequence number is. * * This provides readers a starting point to begin iterating the ringbuffer. * * Context: Any context. * Return: The sequence number of the first/oldest record or, if the * ringbuffer is empty, 0 is returned. */ u64 prb_first_valid_seq(struct printk_ringbuffer *rb) { u64 seq = 0; if (!_prb_read_valid(rb, &seq, NULL, NULL)) return 0; return seq; } /** * prb_next_seq() - Get the sequence number after the last available record. * * @rb: The ringbuffer to get the sequence number from. * * This is the public function available to readers to see what the next * newest sequence number available to readers will be. * * This provides readers a sequence number to jump to if all currently * available records should be skipped. It is guaranteed that all records * previous to the returned value have been finalized and are (or were) * available to the reader. * * Context: Any context. * Return: The sequence number of the next newest (not yet available) record * for readers. */ u64 prb_next_seq(struct printk_ringbuffer *rb) { u64 seq; seq = desc_last_finalized_seq(rb); /* * Begin searching after the last finalized record. * * On 0, the search must begin at 0 because of hack#2 * of the bootstrapping phase it is not known if a * record at index 0 exists. */ if (seq != 0) seq++; /* * The information about the last finalized @seq might be inaccurate. * Search forward to find the current one. */ while (_prb_read_valid(rb, &seq, NULL, NULL)) seq++; return seq; } /** * prb_init() - Initialize a ringbuffer to use provided external buffers. * * @rb: The ringbuffer to initialize. * @text_buf: The data buffer for text data. * @textbits: The size of @text_buf as a power-of-2 value. * @descs: The descriptor buffer for ringbuffer records. * @descbits: The count of @descs items as a power-of-2 value. * @infos: The printk_info buffer for ringbuffer records. * * This is the public function available to writers to setup a ringbuffer * during runtime using provided buffers. * * This must match the initialization of DEFINE_PRINTKRB(). * * Context: Any context. */ void prb_init(struct printk_ringbuffer *rb, char *text_buf, unsigned int textbits, struct prb_desc *descs, unsigned int descbits, struct printk_info *infos) { memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0])); memset(infos, 0, _DESCS_COUNT(descbits) * sizeof(infos[0])); rb->desc_ring.count_bits = descbits; rb->desc_ring.descs = descs; rb->desc_ring.infos = infos; atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits)); atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits)); atomic_long_set(&rb->desc_ring.last_finalized_seq, 0); rb->text_data_ring.size_bits = textbits; rb->text_data_ring.data = text_buf; atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits)); atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits)); atomic_long_set(&rb->fail, 0); atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits)); descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS; descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS; infos[0].seq = -(u64)_DESCS_COUNT(descbits); infos[_DESCS_COUNT(descbits) - 1].seq = 0; } /** * prb_record_text_space() - Query the full actual used ringbuffer space for * the text data of a reserved entry. * * @e: The successfully reserved entry to query. * * This is the public function available to writers to see how much actual * space is used in the ringbuffer to store the text data of the specified * entry. * * This function is only valid if @e has been successfully reserved using * prb_reserve(). * * Context: Any context. * Return: The size in bytes used by the text data of the associated record. */ unsigned int prb_record_text_space(struct prb_reserved_entry *e) { return e->text_space; } |
165 166 167 167 167 70 98 167 162 162 159 17 158 17 14 161 3 3 1 161 160 96 19 19 19 19 19 19 19 19 19 97 79 77 78 17 77 1 7 93 1 79 1 1 1 1 1 1 29 24 24 6 6 1 29 29 5 29 2 29 29 1 1 1 1 1 1 30 3 2 28 30 30 11 10 30 22 22 22 22 22 21 14 14 10 22 19 5 20 6 22 21 22 634 635 99 635 164 163 164 68 68 97 164 164 164 163 163 163 162 163 19 6 19 8 19 4 3 2 1 2 1 2 4 1 388 175 128 388 28 21 25 2 8 17 3 47 12 12 8 47 428 644 644 106 17 645 7 7 7 638 637 2 634 635 4 634 635 93 93 585 585 586 645 388 146 388 387 386 127 388 146 388 388 388 388 386 1 2 21 388 388 388 388 388 18 18 18 18 388 388 3 3 2 3 385 4 4 387 388 428 427 3 428 10 1 10 428 20 19 8 20 1 14 1 13 42 28 25 178 687 691 674 658 674 31 658 660 55 607 521 658 26 633 22 22 24 660 630 660 660 659 659 659 658 659 660 659 678 118 96 118 117 624 625 167 5 5 5 5 5 5 5 5 5 5 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 | // SPDX-License-Identifier: GPL-2.0-or-later /* * USB HID support for Linux * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2007-2008 Oliver Neukum * Copyright (c) 2006-2010 Jiri Kosina */ /* */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/property.h> #include <linux/spinlock.h> #include <linux/unaligned.h> #include <asm/byteorder.h> #include <linux/input.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/hiddev.h> #include <linux/hid-debug.h> #include <linux/hidraw.h> #include "usbhid.h" #include "hid-pidff.h" /* * Version Information */ #define DRIVER_DESC "USB HID core driver" /* * Module parameters. */ static unsigned int hid_mousepoll_interval; module_param_named(mousepoll, hid_mousepoll_interval, uint, 0644); MODULE_PARM_DESC(mousepoll, "Polling interval of mice"); static unsigned int hid_jspoll_interval; module_param_named(jspoll, hid_jspoll_interval, uint, 0644); MODULE_PARM_DESC(jspoll, "Polling interval of joysticks"); static unsigned int hid_kbpoll_interval; module_param_named(kbpoll, hid_kbpoll_interval, uint, 0644); MODULE_PARM_DESC(kbpoll, "Polling interval of keyboards"); static unsigned int ignoreled; module_param_named(ignoreled, ignoreled, uint, 0644); MODULE_PARM_DESC(ignoreled, "Autosuspend with active leds"); /* Quirks specified at module load time */ static char *quirks_param[MAX_USBHID_BOOT_QUIRKS]; module_param_array_named(quirks, quirks_param, charp, NULL, 0444); MODULE_PARM_DESC(quirks, "Add/modify USB HID quirks by specifying " " quirks=vendorID:productID:quirks" " where vendorID, productID, and quirks are all in" " 0x-prefixed hex"); /* * Input submission and I/O error handler. */ static void hid_io_error(struct hid_device *hid); static int hid_submit_out(struct hid_device *hid); static int hid_submit_ctrl(struct hid_device *hid); static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid); /* Start up the input URB */ static int hid_start_in(struct hid_device *hid) { unsigned long flags; int rc = 0; struct usbhid_device *usbhid = hid->driver_data; spin_lock_irqsave(&usbhid->lock, flags); if (test_bit(HID_IN_POLLING, &usbhid->iofl) && !test_bit(HID_DISCONNECTED, &usbhid->iofl) && !test_bit(HID_SUSPENDED, &usbhid->iofl) && !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) { rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC); if (rc != 0) { clear_bit(HID_IN_RUNNING, &usbhid->iofl); if (rc == -ENOSPC) set_bit(HID_NO_BANDWIDTH, &usbhid->iofl); } else { clear_bit(HID_NO_BANDWIDTH, &usbhid->iofl); } } spin_unlock_irqrestore(&usbhid->lock, flags); return rc; } /* I/O retry timer routine */ static void hid_retry_timeout(struct timer_list *t) { struct usbhid_device *usbhid = timer_container_of(usbhid, t, io_retry); struct hid_device *hid = usbhid->hid; dev_dbg(&usbhid->intf->dev, "retrying intr urb\n"); if (hid_start_in(hid)) hid_io_error(hid); } /* Workqueue routine to reset the device or clear a halt */ static void hid_reset(struct work_struct *work) { struct usbhid_device *usbhid = container_of(work, struct usbhid_device, reset_work); struct hid_device *hid = usbhid->hid; int rc; if (test_bit(HID_CLEAR_HALT, &usbhid->iofl)) { dev_dbg(&usbhid->intf->dev, "clear halt\n"); rc = usb_clear_halt(hid_to_usb_dev(hid), usbhid->urbin->pipe); clear_bit(HID_CLEAR_HALT, &usbhid->iofl); if (rc == 0) { hid_start_in(hid); } else { dev_dbg(&usbhid->intf->dev, "clear-halt failed: %d\n", rc); set_bit(HID_RESET_PENDING, &usbhid->iofl); } } if (test_bit(HID_RESET_PENDING, &usbhid->iofl)) { dev_dbg(&usbhid->intf->dev, "resetting device\n"); usb_queue_reset_device(usbhid->intf); } } /* Main I/O error handler */ static void hid_io_error(struct hid_device *hid) { unsigned long flags; struct usbhid_device *usbhid = hid->driver_data; spin_lock_irqsave(&usbhid->lock, flags); /* Stop when disconnected */ if (test_bit(HID_DISCONNECTED, &usbhid->iofl)) goto done; /* If it has been a while since the last error, we'll assume * this a brand new error and reset the retry timeout. */ if (time_after(jiffies, usbhid->stop_retry + HZ/2)) usbhid->retry_delay = 0; /* When an error occurs, retry at increasing intervals */ if (usbhid->retry_delay == 0) { usbhid->retry_delay = 13; /* Then 26, 52, 104, 104, ... */ usbhid->stop_retry = jiffies + msecs_to_jiffies(1000); } else if (usbhid->retry_delay < 100) usbhid->retry_delay *= 2; if (time_after(jiffies, usbhid->stop_retry)) { /* Retries failed, so do a port reset unless we lack bandwidth*/ if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl) && !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) { schedule_work(&usbhid->reset_work); goto done; } } mod_timer(&usbhid->io_retry, jiffies + msecs_to_jiffies(usbhid->retry_delay)); done: spin_unlock_irqrestore(&usbhid->lock, flags); } static void usbhid_mark_busy(struct usbhid_device *usbhid) { struct usb_interface *intf = usbhid->intf; usb_mark_last_busy(interface_to_usbdev(intf)); } static int usbhid_restart_out_queue(struct usbhid_device *usbhid) { struct hid_device *hid = usb_get_intfdata(usbhid->intf); int kicked; int r; if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) || test_bit(HID_SUSPENDED, &usbhid->iofl)) return 0; if ((kicked = (usbhid->outhead != usbhid->outtail))) { hid_dbg(hid, "Kicking head %d tail %d", usbhid->outhead, usbhid->outtail); /* Try to wake up from autosuspend... */ r = usb_autopm_get_interface_async(usbhid->intf); if (r < 0) return r; /* * If still suspended, don't submit. Submission will * occur if/when resume drains the queue. */ if (test_bit(HID_SUSPENDED, &usbhid->iofl)) { usb_autopm_put_interface_no_suspend(usbhid->intf); return r; } /* Asynchronously flush queue. */ set_bit(HID_OUT_RUNNING, &usbhid->iofl); if (hid_submit_out(hid)) { clear_bit(HID_OUT_RUNNING, &usbhid->iofl); usb_autopm_put_interface_async(usbhid->intf); } wake_up(&usbhid->wait); } return kicked; } static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid) { struct hid_device *hid = usb_get_intfdata(usbhid->intf); int kicked; int r; WARN_ON(hid == NULL); if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) || test_bit(HID_SUSPENDED, &usbhid->iofl)) return 0; if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) { hid_dbg(hid, "Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail); /* Try to wake up from autosuspend... */ r = usb_autopm_get_interface_async(usbhid->intf); if (r < 0) return r; /* * If still suspended, don't submit. Submission will * occur if/when resume drains the queue. */ if (test_bit(HID_SUSPENDED, &usbhid->iofl)) { usb_autopm_put_interface_no_suspend(usbhid->intf); return r; } /* Asynchronously flush queue. */ set_bit(HID_CTRL_RUNNING, &usbhid->iofl); if (hid_submit_ctrl(hid)) { clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); usb_autopm_put_interface_async(usbhid->intf); } wake_up(&usbhid->wait); } return kicked; } /* * Input interrupt completion handler. */ static void hid_irq_in(struct urb *urb) { struct hid_device *hid = urb->context; struct usbhid_device *usbhid = hid->driver_data; int status; switch (urb->status) { case 0: /* success */ usbhid->retry_delay = 0; if (!test_bit(HID_OPENED, &usbhid->iofl)) break; usbhid_mark_busy(usbhid); if (!test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) { hid_input_report(urb->context, HID_INPUT_REPORT, urb->transfer_buffer, urb->actual_length, 1); /* * autosuspend refused while keys are pressed * because most keyboards don't wake up when * a key is released */ if (hid_check_keys_pressed(hid)) set_bit(HID_KEYS_PRESSED, &usbhid->iofl); else clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); } break; case -EPIPE: /* stall */ usbhid_mark_busy(usbhid); clear_bit(HID_IN_RUNNING, &usbhid->iofl); set_bit(HID_CLEAR_HALT, &usbhid->iofl); schedule_work(&usbhid->reset_work); return; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: /* unplug */ clear_bit(HID_IN_RUNNING, &usbhid->iofl); return; case -EILSEQ: /* protocol error or unplug */ case -EPROTO: /* protocol error or unplug */ case -ETIME: /* protocol error or unplug */ case -ETIMEDOUT: /* Should never happen, but... */ usbhid_mark_busy(usbhid); clear_bit(HID_IN_RUNNING, &usbhid->iofl); hid_io_error(hid); return; default: /* error */ hid_warn(urb->dev, "input irq status %d received\n", urb->status); } status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { clear_bit(HID_IN_RUNNING, &usbhid->iofl); if (status != -EPERM) { hid_err(hid, "can't resubmit intr, %s-%s/input%d, status %d\n", hid_to_usb_dev(hid)->bus->bus_name, hid_to_usb_dev(hid)->devpath, usbhid->ifnum, status); hid_io_error(hid); } } } static int hid_submit_out(struct hid_device *hid) { struct hid_report *report; char *raw_report; struct usbhid_device *usbhid = hid->driver_data; int r; report = usbhid->out[usbhid->outtail].report; raw_report = usbhid->out[usbhid->outtail].raw_report; usbhid->urbout->transfer_buffer_length = hid_report_len(report); usbhid->urbout->dev = hid_to_usb_dev(hid); if (raw_report) { memcpy(usbhid->outbuf, raw_report, usbhid->urbout->transfer_buffer_length); kfree(raw_report); usbhid->out[usbhid->outtail].raw_report = NULL; } dbg_hid("submitting out urb\n"); r = usb_submit_urb(usbhid->urbout, GFP_ATOMIC); if (r < 0) { hid_err(hid, "usb_submit_urb(out) failed: %d\n", r); return r; } usbhid->last_out = jiffies; return 0; } static int hid_submit_ctrl(struct hid_device *hid) { struct hid_report *report; unsigned char dir; char *raw_report; int len, r; struct usbhid_device *usbhid = hid->driver_data; report = usbhid->ctrl[usbhid->ctrltail].report; raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report; dir = usbhid->ctrl[usbhid->ctrltail].dir; len = hid_report_len(report); if (dir == USB_DIR_OUT) { usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0); if (raw_report) { memcpy(usbhid->ctrlbuf, raw_report, len); kfree(raw_report); usbhid->ctrl[usbhid->ctrltail].raw_report = NULL; } } else { int maxpacket; usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0); maxpacket = usb_maxpacket(hid_to_usb_dev(hid), usbhid->urbctrl->pipe); len += (len == 0); /* Don't allow 0-length reports */ len = round_up(len, maxpacket); if (len > usbhid->bufsize) len = usbhid->bufsize; } usbhid->urbctrl->transfer_buffer_length = len; usbhid->urbctrl->dev = hid_to_usb_dev(hid); usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir; usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT : HID_REQ_GET_REPORT; usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) | report->id); usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum); usbhid->cr->wLength = cpu_to_le16(len); dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n", usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report", usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength); r = usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC); if (r < 0) { hid_err(hid, "usb_submit_urb(ctrl) failed: %d\n", r); return r; } usbhid->last_ctrl = jiffies; return 0; } /* * Output interrupt completion handler. */ static void hid_irq_out(struct urb *urb) { struct hid_device *hid = urb->context; struct usbhid_device *usbhid = hid->driver_data; unsigned long flags; int unplug = 0; switch (urb->status) { case 0: /* success */ break; case -ESHUTDOWN: /* unplug */ unplug = 1; break; case -EILSEQ: /* protocol error or unplug */ case -EPROTO: /* protocol error or unplug */ case -ECONNRESET: /* unlink */ case -ENOENT: break; default: /* error */ hid_warn(urb->dev, "output irq status %d received\n", urb->status); } spin_lock_irqsave(&usbhid->lock, flags); if (unplug) { usbhid->outtail = usbhid->outhead; } else { usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1); if (usbhid->outhead != usbhid->outtail && hid_submit_out(hid) == 0) { /* Successfully submitted next urb in queue */ spin_unlock_irqrestore(&usbhid->lock, flags); return; } } clear_bit(HID_OUT_RUNNING, &usbhid->iofl); spin_unlock_irqrestore(&usbhid->lock, flags); usb_autopm_put_interface_async(usbhid->intf); wake_up(&usbhid->wait); } /* * Control pipe completion handler. */ static void hid_ctrl(struct urb *urb) { struct hid_device *hid = urb->context; struct usbhid_device *usbhid = hid->driver_data; unsigned long flags; int unplug = 0, status = urb->status; switch (status) { case 0: /* success */ if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN) hid_input_report(urb->context, usbhid->ctrl[usbhid->ctrltail].report->type, urb->transfer_buffer, urb->actual_length, 0); break; case -ESHUTDOWN: /* unplug */ unplug = 1; break; case -EILSEQ: /* protocol error or unplug */ case -EPROTO: /* protocol error or unplug */ case -ECONNRESET: /* unlink */ case -ENOENT: case -EPIPE: /* report not available */ break; default: /* error */ hid_warn(urb->dev, "ctrl urb status %d received\n", status); } spin_lock_irqsave(&usbhid->lock, flags); if (unplug) { usbhid->ctrltail = usbhid->ctrlhead; } else if (usbhid->ctrlhead != usbhid->ctrltail) { usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1); if (usbhid->ctrlhead != usbhid->ctrltail && hid_submit_ctrl(hid) == 0) { /* Successfully submitted next urb in queue */ spin_unlock_irqrestore(&usbhid->lock, flags); return; } } clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); spin_unlock_irqrestore(&usbhid->lock, flags); usb_autopm_put_interface_async(usbhid->intf); wake_up(&usbhid->wait); } static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) { int head; struct usbhid_device *usbhid = hid->driver_data; if (((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN) || test_bit(HID_DISCONNECTED, &usbhid->iofl)) return; if (usbhid->urbout && dir == USB_DIR_OUT && report->type == HID_OUTPUT_REPORT) { if ((head = (usbhid->outhead + 1) & (HID_OUTPUT_FIFO_SIZE - 1)) == usbhid->outtail) { hid_warn(hid, "output queue full\n"); return; } usbhid->out[usbhid->outhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC); if (!usbhid->out[usbhid->outhead].raw_report) { hid_warn(hid, "output queueing failed\n"); return; } hid_output_report(report, usbhid->out[usbhid->outhead].raw_report); usbhid->out[usbhid->outhead].report = report; usbhid->outhead = head; /* If the queue isn't running, restart it */ if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl)) { usbhid_restart_out_queue(usbhid); /* Otherwise see if an earlier request has timed out */ } else if (time_after(jiffies, usbhid->last_out + HZ * 5)) { /* Prevent autosuspend following the unlink */ usb_autopm_get_interface_no_resume(usbhid->intf); /* * Prevent resubmission in case the URB completes * before we can unlink it. We don't want to cancel * the wrong transfer! */ usb_block_urb(usbhid->urbout); /* Drop lock to avoid deadlock if the callback runs */ spin_unlock(&usbhid->lock); usb_unlink_urb(usbhid->urbout); spin_lock(&usbhid->lock); usb_unblock_urb(usbhid->urbout); /* Unlink might have stopped the queue */ if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl)) usbhid_restart_out_queue(usbhid); /* Now we can allow autosuspend again */ usb_autopm_put_interface_async(usbhid->intf); } return; } if ((head = (usbhid->ctrlhead + 1) & (HID_CONTROL_FIFO_SIZE - 1)) == usbhid->ctrltail) { hid_warn(hid, "control queue full\n"); return; } if (dir == USB_DIR_OUT) { usbhid->ctrl[usbhid->ctrlhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC); if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) { hid_warn(hid, "control queueing failed\n"); return; } hid_output_report(report, usbhid->ctrl[usbhid->ctrlhead].raw_report); } usbhid->ctrl[usbhid->ctrlhead].report = report; usbhid->ctrl[usbhid->ctrlhead].dir = dir; usbhid->ctrlhead = head; /* If the queue isn't running, restart it */ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) { usbhid_restart_ctrl_queue(usbhid); /* Otherwise see if an earlier request has timed out */ } else if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) { /* Prevent autosuspend following the unlink */ usb_autopm_get_interface_no_resume(usbhid->intf); /* * Prevent resubmission in case the URB completes * before we can unlink it. We don't want to cancel * the wrong transfer! */ usb_block_urb(usbhid->urbctrl); /* Drop lock to avoid deadlock if the callback runs */ spin_unlock(&usbhid->lock); usb_unlink_urb(usbhid->urbctrl); spin_lock(&usbhid->lock); usb_unblock_urb(usbhid->urbctrl); /* Unlink might have stopped the queue */ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) usbhid_restart_ctrl_queue(usbhid); /* Now we can allow autosuspend again */ usb_autopm_put_interface_async(usbhid->intf); } } static void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) { struct usbhid_device *usbhid = hid->driver_data; unsigned long flags; spin_lock_irqsave(&usbhid->lock, flags); __usbhid_submit_report(hid, report, dir); spin_unlock_irqrestore(&usbhid->lock, flags); } static int usbhid_wait_io(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; if (!wait_event_timeout(usbhid->wait, (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && !test_bit(HID_OUT_RUNNING, &usbhid->iofl)), 10*HZ)) { dbg_hid("timeout waiting for ctrl or out queue to clear\n"); return -1; } return 0; } static int hid_set_idle(struct usb_device *dev, int ifnum, int report, int idle) { return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), HID_REQ_SET_IDLE, USB_TYPE_CLASS | USB_RECIP_INTERFACE, (idle << 8) | report, ifnum, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int hid_get_class_descriptor(struct usb_device *dev, int ifnum, unsigned char type, void *buf, int size) { int result, retries = 4; memset(buf, 0, size); do { result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_RECIP_INTERFACE | USB_DIR_IN, (type << 8), ifnum, buf, size, USB_CTRL_GET_TIMEOUT); retries--; } while (result < size && retries); return result; } static int usbhid_open(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; int res; mutex_lock(&usbhid->mutex); set_bit(HID_OPENED, &usbhid->iofl); if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { res = 0; goto Done; } res = usb_autopm_get_interface(usbhid->intf); /* the device must be awake to reliably request remote wakeup */ if (res < 0) { clear_bit(HID_OPENED, &usbhid->iofl); res = -EIO; goto Done; } usbhid->intf->needs_remote_wakeup = 1; set_bit(HID_RESUME_RUNNING, &usbhid->iofl); set_bit(HID_IN_POLLING, &usbhid->iofl); res = hid_start_in(hid); if (res) { if (res != -ENOSPC) { hid_io_error(hid); res = 0; } else { /* no use opening if resources are insufficient */ res = -EBUSY; clear_bit(HID_OPENED, &usbhid->iofl); clear_bit(HID_IN_POLLING, &usbhid->iofl); usbhid->intf->needs_remote_wakeup = 0; } } usb_autopm_put_interface(usbhid->intf); /* * In case events are generated while nobody was listening, * some are released when the device is re-opened. * Wait 50 msec for the queue to empty before allowing events * to go through hid. */ if (res == 0) msleep(50); clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); Done: mutex_unlock(&usbhid->mutex); return res; } static void usbhid_close(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; mutex_lock(&usbhid->mutex); /* * Make sure we don't restart data acquisition due to * a resumption we no longer care about by avoiding racing * with hid_start_in(). */ spin_lock_irq(&usbhid->lock); clear_bit(HID_OPENED, &usbhid->iofl); if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) clear_bit(HID_IN_POLLING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { hid_cancel_delayed_stuff(usbhid); usb_kill_urb(usbhid->urbin); usbhid->intf->needs_remote_wakeup = 0; } mutex_unlock(&usbhid->mutex); } /* * Initialize all reports */ void usbhid_init_reports(struct hid_device *hid) { struct hid_report *report; struct usbhid_device *usbhid = hid->driver_data; struct hid_report_enum *report_enum; int err, ret; report_enum = &hid->report_enum[HID_INPUT_REPORT]; list_for_each_entry(report, &report_enum->report_list, list) usbhid_submit_report(hid, report, USB_DIR_IN); report_enum = &hid->report_enum[HID_FEATURE_REPORT]; list_for_each_entry(report, &report_enum->report_list, list) usbhid_submit_report(hid, report, USB_DIR_IN); err = 0; ret = usbhid_wait_io(hid); while (ret) { err |= ret; if (test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) usb_kill_urb(usbhid->urbctrl); if (test_bit(HID_OUT_RUNNING, &usbhid->iofl)) usb_kill_urb(usbhid->urbout); ret = usbhid_wait_io(hid); } if (err) hid_warn(hid, "timeout initializing reports\n"); } /* * Reset LEDs which BIOS might have left on. For now, just NumLock (0x01). */ static int hid_find_field_early(struct hid_device *hid, unsigned int page, unsigned int hid_code, struct hid_field **pfield) { struct hid_report *report; struct hid_field *field; struct hid_usage *usage; int i, j; list_for_each_entry(report, &hid->report_enum[HID_OUTPUT_REPORT].report_list, list) { for (i = 0; i < report->maxfield; i++) { field = report->field[i]; for (j = 0; j < field->maxusage; j++) { usage = &field->usage[j]; if ((usage->hid & HID_USAGE_PAGE) == page && (usage->hid & 0xFFFF) == hid_code) { *pfield = field; return j; } } } } return -1; } static void usbhid_set_leds(struct hid_device *hid) { struct hid_field *field; int offset; if ((offset = hid_find_field_early(hid, HID_UP_LED, 0x01, &field)) != -1) { hid_set_field(field, offset, 0); usbhid_submit_report(hid, field->report, USB_DIR_OUT); } } /* * Traverse the supplied list of reports and find the longest */ static void hid_find_max_report(struct hid_device *hid, unsigned int type, unsigned int *max) { struct hid_report *report; unsigned int size; list_for_each_entry(report, &hid->report_enum[type].report_list, list) { size = ((report->size - 1) >> 3) + 1 + hid->report_enum[type].numbered; if (*max < size) *max = size; } } static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; usbhid->inbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL, &usbhid->inbuf_dma); usbhid->outbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL, &usbhid->outbuf_dma); usbhid->cr = kmalloc(sizeof(*usbhid->cr), GFP_KERNEL); usbhid->ctrlbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL, &usbhid->ctrlbuf_dma); if (!usbhid->inbuf || !usbhid->outbuf || !usbhid->cr || !usbhid->ctrlbuf) return -1; return 0; } static int usbhid_get_raw_report(struct hid_device *hid, unsigned char report_number, __u8 *buf, size_t count, unsigned char report_type) { struct usbhid_device *usbhid = hid->driver_data; struct usb_device *dev = hid_to_usb_dev(hid); struct usb_interface *intf = usbhid->intf; struct usb_host_interface *interface = intf->cur_altsetting; int skipped_report_id = 0; int ret; /* Byte 0 is the report number. Report data starts at byte 1.*/ buf[0] = report_number; if (report_number == 0x0) { /* Offset the return buffer by 1, so that the report ID will remain in byte 0. */ buf++; count--; skipped_report_id = 1; } ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), HID_REQ_GET_REPORT, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, ((report_type + 1) << 8) | report_number, interface->desc.bInterfaceNumber, buf, count, USB_CTRL_SET_TIMEOUT); /* count also the report id */ if (ret > 0 && skipped_report_id) ret++; return ret; } static int usbhid_set_raw_report(struct hid_device *hid, unsigned int reportnum, __u8 *buf, size_t count, unsigned char rtype) { struct usbhid_device *usbhid = hid->driver_data; struct usb_device *dev = hid_to_usb_dev(hid); struct usb_interface *intf = usbhid->intf; struct usb_host_interface *interface = intf->cur_altsetting; int ret, skipped_report_id = 0; /* Byte 0 is the report number. Report data starts at byte 1.*/ if ((rtype == HID_OUTPUT_REPORT) && (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORT_ID)) buf[0] = 0; else buf[0] = reportnum; if (buf[0] == 0x0) { /* Don't send the Report ID */ buf++; count--; skipped_report_id = 1; } ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), HID_REQ_SET_REPORT, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, ((rtype + 1) << 8) | reportnum, interface->desc.bInterfaceNumber, buf, count, USB_CTRL_SET_TIMEOUT); /* count also the report id, if this was a numbered report. */ if (ret > 0 && skipped_report_id) ret++; return ret; } static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count) { struct usbhid_device *usbhid = hid->driver_data; struct usb_device *dev = hid_to_usb_dev(hid); int actual_length, skipped_report_id = 0, ret; if (!usbhid->urbout) return -ENOSYS; if (buf[0] == 0x0) { /* Don't send the Report ID */ buf++; count--; skipped_report_id = 1; } ret = usb_interrupt_msg(dev, usbhid->urbout->pipe, buf, count, &actual_length, USB_CTRL_SET_TIMEOUT); /* return the number of bytes transferred */ if (ret == 0) { ret = actual_length; /* count also the report id */ if (skipped_report_id) ret++; } return ret; } static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; usb_free_coherent(dev, usbhid->bufsize, usbhid->inbuf, usbhid->inbuf_dma); usb_free_coherent(dev, usbhid->bufsize, usbhid->outbuf, usbhid->outbuf_dma); kfree(usbhid->cr); usb_free_coherent(dev, usbhid->bufsize, usbhid->ctrlbuf, usbhid->ctrlbuf_dma); } static int usbhid_parse(struct hid_device *hid) { struct usb_interface *intf = to_usb_interface(hid->dev.parent); struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev (intf); struct hid_descriptor *hdesc; struct hid_class_descriptor *hcdesc; u32 quirks = 0; unsigned int rsize = 0; char *rdesc; int ret; quirks = hid_lookup_quirk(hid); if (quirks & HID_QUIRK_IGNORE) return -ENODEV; /* Many keyboards and mice don't like to be polled for reports, * so we will always set the HID_QUIRK_NOGET flag for them. */ if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) { if (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD || interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) quirks |= HID_QUIRK_NOGET; } if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) && (!interface->desc.bNumEndpoints || usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) { dbg_hid("class descriptor not present\n"); return -ENODEV; } if (!hdesc->bNumDescriptors || hdesc->bLength != sizeof(*hdesc) + (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) { dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n", hdesc->bLength, hdesc->bNumDescriptors); return -EINVAL; } hid->version = le16_to_cpu(hdesc->bcdHID); hid->country = hdesc->bCountryCode; if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT) rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength); if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) { dbg_hid("weird size of report descriptor (%u)\n", rsize); return -EINVAL; } rdesc = kmalloc(rsize, GFP_KERNEL); if (!rdesc) return -ENOMEM; hid_set_idle(dev, interface->desc.bInterfaceNumber, 0, 0); ret = hid_get_class_descriptor(dev, interface->desc.bInterfaceNumber, HID_DT_REPORT, rdesc, rsize); if (ret < 0) { dbg_hid("reading report descriptor failed\n"); kfree(rdesc); goto err; } ret = hid_parse_report(hid, rdesc, rsize); kfree(rdesc); if (ret) { dbg_hid("parsing report descriptor failed\n"); goto err; } if (hdesc->bNumDescriptors > 1) hid_warn(intf, "%u unsupported optional hid class descriptors\n", (int)(hdesc->bNumDescriptors - 1)); hid->quirks |= quirks; return 0; err: return ret; } static int usbhid_start(struct hid_device *hid) { struct usb_interface *intf = to_usb_interface(hid->dev.parent); struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev(intf); struct usbhid_device *usbhid = hid->driver_data; unsigned int n, insize = 0; int ret; mutex_lock(&usbhid->mutex); clear_bit(HID_DISCONNECTED, &usbhid->iofl); usbhid->bufsize = HID_MIN_BUFFER_SIZE; hid_find_max_report(hid, HID_INPUT_REPORT, &usbhid->bufsize); hid_find_max_report(hid, HID_OUTPUT_REPORT, &usbhid->bufsize); hid_find_max_report(hid, HID_FEATURE_REPORT, &usbhid->bufsize); if (usbhid->bufsize > HID_MAX_BUFFER_SIZE) usbhid->bufsize = HID_MAX_BUFFER_SIZE; hid_find_max_report(hid, HID_INPUT_REPORT, &insize); if (insize > HID_MAX_BUFFER_SIZE) insize = HID_MAX_BUFFER_SIZE; if (hid_alloc_buffers(dev, hid)) { ret = -ENOMEM; goto fail; } for (n = 0; n < interface->desc.bNumEndpoints; n++) { struct usb_endpoint_descriptor *endpoint; int pipe; int interval; endpoint = &interface->endpoint[n].desc; if (!usb_endpoint_xfer_int(endpoint)) continue; interval = endpoint->bInterval; /* Some vendors give fullspeed interval on highspeed devices */ if (hid->quirks & HID_QUIRK_FULLSPEED_INTERVAL && dev->speed == USB_SPEED_HIGH) { interval = fls(endpoint->bInterval*8); pr_info("%s: Fixing fullspeed to highspeed interval: %d -> %d\n", hid->name, endpoint->bInterval, interval); } /* Change the polling interval of mice, joysticks * and keyboards. */ switch (hid->collection->usage) { case HID_GD_MOUSE: if (hid_mousepoll_interval > 0) interval = hid_mousepoll_interval; break; case HID_GD_JOYSTICK: if (hid_jspoll_interval > 0) interval = hid_jspoll_interval; break; case HID_GD_KEYBOARD: if (hid_kbpoll_interval > 0) interval = hid_kbpoll_interval; break; } ret = -ENOMEM; if (usb_endpoint_dir_in(endpoint)) { if (usbhid->urbin) continue; if (!(usbhid->urbin = usb_alloc_urb(0, GFP_KERNEL))) goto fail; pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); usb_fill_int_urb(usbhid->urbin, dev, pipe, usbhid->inbuf, insize, hid_irq_in, hid, interval); usbhid->urbin->transfer_dma = usbhid->inbuf_dma; usbhid->urbin->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } else { if (usbhid->urbout) continue; if (!(usbhid->urbout = usb_alloc_urb(0, GFP_KERNEL))) goto fail; pipe = usb_sndintpipe(dev, endpoint->bEndpointAddress); usb_fill_int_urb(usbhid->urbout, dev, pipe, usbhid->outbuf, 0, hid_irq_out, hid, interval); usbhid->urbout->transfer_dma = usbhid->outbuf_dma; usbhid->urbout->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } } usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); if (!usbhid->urbctrl) { ret = -ENOMEM; goto fail; } usb_fill_control_urb(usbhid->urbctrl, dev, 0, (void *) usbhid->cr, usbhid->ctrlbuf, 1, hid_ctrl, hid); usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma; usbhid->urbctrl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; set_bit(HID_STARTED, &usbhid->iofl); if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { ret = usb_autopm_get_interface(usbhid->intf); if (ret) goto fail; set_bit(HID_IN_POLLING, &usbhid->iofl); usbhid->intf->needs_remote_wakeup = 1; ret = hid_start_in(hid); if (ret) { dev_err(&hid->dev, "failed to start in urb: %d\n", ret); } usb_autopm_put_interface(usbhid->intf); } /* Some keyboards don't work until their LEDs have been set. * Since BIOSes do set the LEDs, it must be safe for any device * that supports the keyboard boot protocol. * In addition, enable remote wakeup by default for all keyboard * devices supporting the boot protocol. */ if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT && interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD) { usbhid_set_leds(hid); device_set_wakeup_enable(&dev->dev, 1); } mutex_unlock(&usbhid->mutex); return 0; fail: usb_free_urb(usbhid->urbin); usb_free_urb(usbhid->urbout); usb_free_urb(usbhid->urbctrl); usbhid->urbin = NULL; usbhid->urbout = NULL; usbhid->urbctrl = NULL; hid_free_buffers(dev, hid); mutex_unlock(&usbhid->mutex); return ret; } static void usbhid_stop(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; if (WARN_ON(!usbhid)) return; if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { clear_bit(HID_IN_POLLING, &usbhid->iofl); usbhid->intf->needs_remote_wakeup = 0; } mutex_lock(&usbhid->mutex); clear_bit(HID_STARTED, &usbhid->iofl); spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ set_bit(HID_DISCONNECTED, &usbhid->iofl); while (usbhid->ctrltail != usbhid->ctrlhead) { if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_OUT) { kfree(usbhid->ctrl[usbhid->ctrltail].raw_report); usbhid->ctrl[usbhid->ctrltail].raw_report = NULL; } usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1); } spin_unlock_irq(&usbhid->lock); usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbout); usb_kill_urb(usbhid->urbctrl); hid_cancel_delayed_stuff(usbhid); hid->claimed = 0; usb_free_urb(usbhid->urbin); usb_free_urb(usbhid->urbctrl); usb_free_urb(usbhid->urbout); usbhid->urbin = NULL; /* don't mess up next start */ usbhid->urbctrl = NULL; usbhid->urbout = NULL; hid_free_buffers(hid_to_usb_dev(hid), hid); mutex_unlock(&usbhid->mutex); } static int usbhid_power(struct hid_device *hid, int lvl) { struct usbhid_device *usbhid = hid->driver_data; int r = 0; switch (lvl) { case PM_HINT_FULLON: r = usb_autopm_get_interface(usbhid->intf); break; case PM_HINT_NORMAL: usb_autopm_put_interface(usbhid->intf); break; } return r; } static void usbhid_request(struct hid_device *hid, struct hid_report *rep, int reqtype) { switch (reqtype) { case HID_REQ_GET_REPORT: usbhid_submit_report(hid, rep, USB_DIR_IN); break; case HID_REQ_SET_REPORT: usbhid_submit_report(hid, rep, USB_DIR_OUT); break; } } static int usbhid_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) { switch (reqtype) { case HID_REQ_GET_REPORT: return usbhid_get_raw_report(hid, reportnum, buf, len, rtype); case HID_REQ_SET_REPORT: return usbhid_set_raw_report(hid, reportnum, buf, len, rtype); default: return -EIO; } } static int usbhid_idle(struct hid_device *hid, int report, int idle, int reqtype) { struct usb_device *dev = hid_to_usb_dev(hid); struct usb_interface *intf = to_usb_interface(hid->dev.parent); struct usb_host_interface *interface = intf->cur_altsetting; int ifnum = interface->desc.bInterfaceNumber; if (reqtype != HID_REQ_SET_IDLE) return -EINVAL; return hid_set_idle(dev, ifnum, report, idle); } static bool usbhid_may_wakeup(struct hid_device *hid) { struct usb_device *dev = hid_to_usb_dev(hid); return device_may_wakeup(&dev->dev); } static const struct hid_ll_driver usb_hid_driver = { .parse = usbhid_parse, .start = usbhid_start, .stop = usbhid_stop, .open = usbhid_open, .close = usbhid_close, .power = usbhid_power, .request = usbhid_request, .wait = usbhid_wait_io, .raw_request = usbhid_raw_request, .output_report = usbhid_output_report, .idle = usbhid_idle, .may_wakeup = usbhid_may_wakeup, }; bool hid_is_usb(const struct hid_device *hdev) { return hdev->ll_driver == &usb_hid_driver; } EXPORT_SYMBOL_GPL(hid_is_usb); static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev(intf); struct usbhid_device *usbhid; struct hid_device *hid; unsigned int n, has_in = 0; size_t len; int ret; dbg_hid("HID probe called for ifnum %d\n", intf->altsetting->desc.bInterfaceNumber); for (n = 0; n < interface->desc.bNumEndpoints; n++) if (usb_endpoint_is_int_in(&interface->endpoint[n].desc)) has_in++; if (!has_in) { hid_err(intf, "couldn't find an input interrupt endpoint\n"); return -ENODEV; } hid = hid_allocate_device(); if (IS_ERR(hid)) return PTR_ERR(hid); usb_set_intfdata(intf, hid); hid->ll_driver = &usb_hid_driver; hid->ff_init = hid_pidff_init; #ifdef CONFIG_USB_HIDDEV hid->hiddev_connect = hiddev_connect; hid->hiddev_disconnect = hiddev_disconnect; hid->hiddev_hid_event = hiddev_hid_event; hid->hiddev_report_event = hiddev_report_event; #endif hid->dev.parent = &intf->dev; device_set_node(&hid->dev, dev_fwnode(&intf->dev)); hid->bus = BUS_USB; hid->vendor = le16_to_cpu(dev->descriptor.idVendor); hid->product = le16_to_cpu(dev->descriptor.idProduct); hid->version = le16_to_cpu(dev->descriptor.bcdDevice); hid->name[0] = 0; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) hid->type = HID_TYPE_USBMOUSE; else if (intf->cur_altsetting->desc.bInterfaceProtocol == 0) hid->type = HID_TYPE_USBNONE; if (dev->manufacturer) strscpy(hid->name, dev->manufacturer, sizeof(hid->name)); if (dev->product) { if (dev->manufacturer) strlcat(hid->name, " ", sizeof(hid->name)); strlcat(hid->name, dev->product, sizeof(hid->name)); } if (!strlen(hid->name)) snprintf(hid->name, sizeof(hid->name), "HID %04x:%04x", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); usb_make_path(dev, hid->phys, sizeof(hid->phys)); strlcat(hid->phys, "/input", sizeof(hid->phys)); len = strlen(hid->phys); if (len < sizeof(hid->phys) - 1) snprintf(hid->phys + len, sizeof(hid->phys) - len, "%d", intf->altsetting[0].desc.bInterfaceNumber); if (usb_string(dev, dev->descriptor.iSerialNumber, hid->uniq, 64) <= 0) hid->uniq[0] = 0; usbhid = kzalloc(sizeof(*usbhid), GFP_KERNEL); if (usbhid == NULL) { ret = -ENOMEM; goto err; } hid->driver_data = usbhid; usbhid->hid = hid; usbhid->intf = intf; usbhid->ifnum = interface->desc.bInterfaceNumber; init_waitqueue_head(&usbhid->wait); INIT_WORK(&usbhid->reset_work, hid_reset); timer_setup(&usbhid->io_retry, hid_retry_timeout, 0); spin_lock_init(&usbhid->lock); mutex_init(&usbhid->mutex); ret = hid_add_device(hid); if (ret) { if (ret != -ENODEV) hid_err(intf, "can't add hid device: %d\n", ret); goto err_free; } return 0; err_free: kfree(usbhid); err: hid_destroy_device(hid); return ret; } static void usbhid_disconnect(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid; if (WARN_ON(!hid)) return; usbhid = hid->driver_data; spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ set_bit(HID_DISCONNECTED, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); hid_destroy_device(hid); kfree(usbhid); } static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid) { timer_delete_sync(&usbhid->io_retry); cancel_work_sync(&usbhid->reset_work); } static void hid_cease_io(struct usbhid_device *usbhid) { timer_delete_sync(&usbhid->io_retry); usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbctrl); usb_kill_urb(usbhid->urbout); } static void hid_restart_io(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl); int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl); spin_lock_irq(&usbhid->lock); clear_bit(HID_SUSPENDED, &usbhid->iofl); usbhid_mark_busy(usbhid); if (clear_halt || reset_pending) schedule_work(&usbhid->reset_work); usbhid->retry_delay = 0; spin_unlock_irq(&usbhid->lock); if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl)) return; if (!clear_halt) { if (hid_start_in(hid) < 0) hid_io_error(hid); } spin_lock_irq(&usbhid->lock); if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl)) usbhid_restart_out_queue(usbhid); if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) usbhid_restart_ctrl_queue(usbhid); spin_unlock_irq(&usbhid->lock); } /* Treat USB reset pretty much the same as suspend/resume */ static int hid_pre_reset(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; spin_lock_irq(&usbhid->lock); set_bit(HID_RESET_PENDING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); hid_cease_io(usbhid); return 0; } /* Same routine used for post_reset and reset_resume */ static int hid_post_reset(struct usb_interface *intf) { struct usb_device *dev = interface_to_usbdev (intf); struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; struct usb_host_interface *interface = intf->cur_altsetting; int status; char *rdesc; /* Fetch and examine the HID report descriptor. If this * has changed, then rebind. Since usbcore's check of the * configuration descriptors passed, we already know that * the size of the HID report descriptor has not changed. */ rdesc = kmalloc(hid->dev_rsize, GFP_KERNEL); if (!rdesc) return -ENOMEM; status = hid_get_class_descriptor(dev, interface->desc.bInterfaceNumber, HID_DT_REPORT, rdesc, hid->dev_rsize); if (status < 0) { dbg_hid("reading report descriptor failed (post_reset)\n"); kfree(rdesc); return status; } status = memcmp(rdesc, hid->dev_rdesc, hid->dev_rsize); kfree(rdesc); if (status != 0) { dbg_hid("report descriptor changed\n"); return -EPERM; } /* No need to do another reset or clear a halted endpoint */ spin_lock_irq(&usbhid->lock); clear_bit(HID_RESET_PENDING, &usbhid->iofl); clear_bit(HID_CLEAR_HALT, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0); hid_restart_io(hid); return 0; } static int hid_resume_common(struct hid_device *hid, bool driver_suspended) { int status = 0; hid_restart_io(hid); if (driver_suspended) status = hid_driver_resume(hid); return status; } static int hid_suspend(struct usb_interface *intf, pm_message_t message) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; int status = 0; bool driver_suspended = false; unsigned int ledcount; if (PMSG_IS_AUTO(message)) { ledcount = hidinput_count_leds(hid); spin_lock_irq(&usbhid->lock); /* Sync with error handler */ if (!test_bit(HID_RESET_PENDING, &usbhid->iofl) && !test_bit(HID_CLEAR_HALT, &usbhid->iofl) && !test_bit(HID_OUT_RUNNING, &usbhid->iofl) && !test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && !test_bit(HID_KEYS_PRESSED, &usbhid->iofl) && (!ledcount || ignoreled)) { set_bit(HID_SUSPENDED, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); status = hid_driver_suspend(hid, message); if (status < 0) goto failed; driver_suspended = true; } else { usbhid_mark_busy(usbhid); spin_unlock_irq(&usbhid->lock); return -EBUSY; } } else { /* TODO: resume() might need to handle suspend failure */ status = hid_driver_suspend(hid, message); driver_suspended = true; spin_lock_irq(&usbhid->lock); set_bit(HID_SUSPENDED, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); if (usbhid_wait_io(hid) < 0) status = -EIO; } hid_cancel_delayed_stuff(usbhid); hid_cease_io(usbhid); if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { /* lost race against keypresses */ status = -EBUSY; goto failed; } dev_dbg(&intf->dev, "suspend\n"); return status; failed: hid_resume_common(hid, driver_suspended); return status; } static int hid_resume(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata (intf); int status; status = hid_resume_common(hid, true); dev_dbg(&intf->dev, "resume status %d\n", status); return 0; } static int hid_reset_resume(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); int status; status = hid_post_reset(intf); if (status >= 0) { int ret = hid_driver_reset_resume(hid); if (ret < 0) status = ret; } return status; } static const struct usb_device_id hid_usb_ids[] = { { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, .bInterfaceClass = USB_INTERFACE_CLASS_HID }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, hid_usb_ids); static struct usb_driver hid_driver = { .name = "usbhid", .probe = usbhid_probe, .disconnect = usbhid_disconnect, .suspend = pm_ptr(hid_suspend), .resume = pm_ptr(hid_resume), .reset_resume = pm_ptr(hid_reset_resume), .pre_reset = hid_pre_reset, .post_reset = hid_post_reset, .id_table = hid_usb_ids, .supports_autosuspend = 1, }; struct usb_interface *usbhid_find_interface(int minor) { return usb_find_interface(&hid_driver, minor); } static int __init hid_init(void) { int retval; retval = hid_quirks_init(quirks_param, BUS_USB, MAX_USBHID_BOOT_QUIRKS); if (retval) goto usbhid_quirks_init_fail; retval = usb_register(&hid_driver); if (retval) goto usb_register_fail; pr_info(KBUILD_MODNAME ": " DRIVER_DESC "\n"); return 0; usb_register_fail: hid_quirks_exit(BUS_USB); usbhid_quirks_init_fail: return retval; } static void __exit hid_exit(void) { usb_deregister(&hid_driver); hid_quirks_exit(BUS_USB); } module_init(hid_init); module_exit(hid_exit); MODULE_AUTHOR("Andreas Gal"); MODULE_AUTHOR("Vojtech Pavlik"); MODULE_AUTHOR("Jiri Kosina"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 | // SPDX-License-Identifier: GPL-2.0-only /* * Intel La Jolla Cove Adapter USB driver * * Copyright (c) 2023, Intel Corporation. */ #include <linux/acpi.h> #include <linux/auxiliary_bus.h> #include <linux/dev_printk.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/usb.h> #include <linux/usb/ljca.h> #include <linux/unaligned.h> /* command flags */ #define LJCA_ACK_FLAG BIT(0) #define LJCA_RESP_FLAG BIT(1) #define LJCA_CMPL_FLAG BIT(2) #define LJCA_MAX_PACKET_SIZE 64u #define LJCA_MAX_PAYLOAD_SIZE \ (LJCA_MAX_PACKET_SIZE - sizeof(struct ljca_msg)) #define LJCA_WRITE_TIMEOUT_MS 200 #define LJCA_WRITE_ACK_TIMEOUT_MS 500 #define LJCA_ENUM_CLIENT_TIMEOUT_MS 20 /* ljca client type */ enum ljca_client_type { LJCA_CLIENT_MNG = 1, LJCA_CLIENT_GPIO = 3, LJCA_CLIENT_I2C = 4, LJCA_CLIENT_SPI = 5, }; /* MNG client commands */ enum ljca_mng_cmd { LJCA_MNG_RESET = 2, LJCA_MNG_ENUM_GPIO = 4, LJCA_MNG_ENUM_I2C = 5, LJCA_MNG_ENUM_SPI = 8, }; /* ljca client acpi _ADR */ enum ljca_client_acpi_adr { LJCA_GPIO_ACPI_ADR, LJCA_I2C1_ACPI_ADR, LJCA_I2C2_ACPI_ADR, LJCA_SPI1_ACPI_ADR, LJCA_SPI2_ACPI_ADR, LJCA_CLIENT_ACPI_ADR_MAX, }; /* ljca cmd message structure */ struct ljca_msg { u8 type; u8 cmd; u8 flags; u8 len; u8 data[] __counted_by(len); } __packed; struct ljca_i2c_ctr_info { u8 id; u8 capacity; u8 intr_pin; } __packed; struct ljca_i2c_descriptor { u8 num; struct ljca_i2c_ctr_info info[] __counted_by(num); } __packed; struct ljca_spi_ctr_info { u8 id; u8 capacity; u8 intr_pin; } __packed; struct ljca_spi_descriptor { u8 num; struct ljca_spi_ctr_info info[] __counted_by(num); } __packed; struct ljca_bank_descriptor { u8 bank_id; u8 pin_num; /* 1 bit for each gpio, 1 means valid */ __le32 valid_pins; } __packed; struct ljca_gpio_descriptor { u8 pins_per_bank; u8 bank_num; struct ljca_bank_descriptor bank_desc[] __counted_by(bank_num); } __packed; /** * struct ljca_adapter - represent a ljca adapter * * @intf: the usb interface for this ljca adapter * @usb_dev: the usb device for this ljca adapter * @dev: the specific device info of the usb interface * @rx_pipe: bulk in pipe for receive data from firmware * @tx_pipe: bulk out pipe for send data to firmware * @rx_urb: urb used for the bulk in pipe * @rx_buf: buffer used to receive command response and event * @rx_len: length of rx buffer * @ex_buf: external buffer to save command response * @ex_buf_len: length of external buffer * @actual_length: actual length of data copied to external buffer * @tx_buf: buffer used to download command to firmware * @tx_buf_len: length of tx buffer * @lock: spinlock to protect tx_buf and ex_buf * @cmd_completion: completion object as the command receives ack * @mutex: mutex to avoid command download concurrently * @client_list: client device list * @disconnect: usb disconnect ongoing or not * @reset_id: used to reset firmware */ struct ljca_adapter { struct usb_interface *intf; struct usb_device *usb_dev; struct device *dev; unsigned int rx_pipe; unsigned int tx_pipe; struct urb *rx_urb; void *rx_buf; unsigned int rx_len; u8 *ex_buf; u8 ex_buf_len; u8 actual_length; void *tx_buf; u8 tx_buf_len; spinlock_t lock; struct completion cmd_completion; struct mutex mutex; struct list_head client_list; bool disconnect; u32 reset_id; }; struct ljca_match_ids_walk_data { const struct acpi_device_id *ids; const char *uid; struct acpi_device *adev; }; static const struct acpi_device_id ljca_gpio_hids[] = { { "INTC1074" }, { "INTC1096" }, { "INTC100B" }, { "INTC10D1" }, { "INTC10B5" }, {}, }; static const struct acpi_device_id ljca_i2c_hids[] = { { "INTC1075" }, { "INTC1097" }, { "INTC100C" }, { "INTC10D2" }, {}, }; static const struct acpi_device_id ljca_spi_hids[] = { { "INTC1091" }, { "INTC1098" }, { "INTC100D" }, { "INTC10D3" }, {}, }; static void ljca_handle_event(struct ljca_adapter *adap, struct ljca_msg *header) { struct ljca_client *client; list_for_each_entry(client, &adap->client_list, link) { /* * Currently only GPIO register event callback, but * firmware message structure should include id when * multiple same type clients register event callback. */ if (client->type == header->type) { unsigned long flags; spin_lock_irqsave(&client->event_cb_lock, flags); client->event_cb(client->context, header->cmd, header->data, header->len); spin_unlock_irqrestore(&client->event_cb_lock, flags); break; } } } /* process command ack and received data if available */ static void ljca_handle_cmd_ack(struct ljca_adapter *adap, struct ljca_msg *header) { struct ljca_msg *tx_header = adap->tx_buf; u8 ibuf_len, actual_len = 0; unsigned long flags; u8 *ibuf; spin_lock_irqsave(&adap->lock, flags); if (tx_header->type != header->type || tx_header->cmd != header->cmd) { spin_unlock_irqrestore(&adap->lock, flags); dev_err(adap->dev, "cmd ack mismatch error\n"); return; } ibuf_len = adap->ex_buf_len; ibuf = adap->ex_buf; if (ibuf && ibuf_len) { actual_len = min(header->len, ibuf_len); /* copy received data to external buffer */ memcpy(ibuf, header->data, actual_len); } /* update copied data length */ adap->actual_length = actual_len; spin_unlock_irqrestore(&adap->lock, flags); complete(&adap->cmd_completion); } static void ljca_recv(struct urb *urb) { struct ljca_msg *header = urb->transfer_buffer; struct ljca_adapter *adap = urb->context; int ret; switch (urb->status) { case 0: /* success */ break; case -ENOENT: /* * directly complete the possible ongoing transfer * during disconnect */ if (adap->disconnect) complete(&adap->cmd_completion); return; case -ECONNRESET: case -ESHUTDOWN: case -EPIPE: /* rx urb is terminated */ dev_dbg(adap->dev, "rx urb terminated with status: %d\n", urb->status); return; default: dev_dbg(adap->dev, "rx urb error: %d\n", urb->status); goto resubmit; } if (header->len + sizeof(*header) != urb->actual_length) goto resubmit; if (header->flags & LJCA_ACK_FLAG) ljca_handle_cmd_ack(adap, header); else ljca_handle_event(adap, header); resubmit: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret && ret != -EPERM) dev_err(adap->dev, "resubmit rx urb error %d\n", ret); } static int ljca_send(struct ljca_adapter *adap, u8 type, u8 cmd, const u8 *obuf, u8 obuf_len, u8 *ibuf, u8 ibuf_len, bool ack, unsigned long timeout) { unsigned int msg_len = sizeof(struct ljca_msg) + obuf_len; struct ljca_msg *header = adap->tx_buf; unsigned int transferred; unsigned long flags; int ret; if (adap->disconnect) return -ENODEV; if (msg_len > adap->tx_buf_len) return -EINVAL; mutex_lock(&adap->mutex); spin_lock_irqsave(&adap->lock, flags); header->type = type; header->cmd = cmd; header->len = obuf_len; if (obuf) memcpy(header->data, obuf, obuf_len); header->flags = LJCA_CMPL_FLAG | (ack ? LJCA_ACK_FLAG : 0); adap->ex_buf = ibuf; adap->ex_buf_len = ibuf_len; adap->actual_length = 0; spin_unlock_irqrestore(&adap->lock, flags); reinit_completion(&adap->cmd_completion); ret = usb_autopm_get_interface(adap->intf); if (ret < 0) goto out; ret = usb_bulk_msg(adap->usb_dev, adap->tx_pipe, header, msg_len, &transferred, LJCA_WRITE_TIMEOUT_MS); if (ret < 0) goto out_put; if (transferred != msg_len) { ret = -EIO; goto out_put; } if (ack) { ret = wait_for_completion_timeout(&adap->cmd_completion, timeout); if (!ret) { ret = -ETIMEDOUT; goto out_put; } } ret = adap->actual_length; out_put: usb_autopm_put_interface(adap->intf); out: spin_lock_irqsave(&adap->lock, flags); adap->ex_buf = NULL; adap->ex_buf_len = 0; memset(header, 0, sizeof(*header)); spin_unlock_irqrestore(&adap->lock, flags); mutex_unlock(&adap->mutex); return ret; } int ljca_transfer(struct ljca_client *client, u8 cmd, const u8 *obuf, u8 obuf_len, u8 *ibuf, u8 ibuf_len) { return ljca_send(client->adapter, client->type, cmd, obuf, obuf_len, ibuf, ibuf_len, true, LJCA_WRITE_ACK_TIMEOUT_MS); } EXPORT_SYMBOL_NS_GPL(ljca_transfer, "LJCA"); int ljca_transfer_noack(struct ljca_client *client, u8 cmd, const u8 *obuf, u8 obuf_len) { return ljca_send(client->adapter, client->type, cmd, obuf, obuf_len, NULL, 0, false, LJCA_WRITE_ACK_TIMEOUT_MS); } EXPORT_SYMBOL_NS_GPL(ljca_transfer_noack, "LJCA"); int ljca_register_event_cb(struct ljca_client *client, ljca_event_cb_t event_cb, void *context) { unsigned long flags; if (!event_cb) return -EINVAL; spin_lock_irqsave(&client->event_cb_lock, flags); if (client->event_cb) { spin_unlock_irqrestore(&client->event_cb_lock, flags); return -EALREADY; } client->event_cb = event_cb; client->context = context; spin_unlock_irqrestore(&client->event_cb_lock, flags); return 0; } EXPORT_SYMBOL_NS_GPL(ljca_register_event_cb, "LJCA"); void ljca_unregister_event_cb(struct ljca_client *client) { unsigned long flags; spin_lock_irqsave(&client->event_cb_lock, flags); client->event_cb = NULL; client->context = NULL; spin_unlock_irqrestore(&client->event_cb_lock, flags); } EXPORT_SYMBOL_NS_GPL(ljca_unregister_event_cb, "LJCA"); static int ljca_match_device_ids(struct acpi_device *adev, void *data) { struct ljca_match_ids_walk_data *wd = data; const char *uid = acpi_device_uid(adev); if (acpi_match_device_ids(adev, wd->ids)) return 0; if (!wd->uid) goto match; if (!uid) /* * Some DSDTs have only one ACPI companion for the two I2C * controllers and they don't set a UID at all (e.g. Dell * Latitude 9420). On these platforms only the first I2C * controller is used, so if a HID match has no UID we use * "0" as the UID and assign ACPI companion to the first * I2C controller. */ uid = "0"; else uid = strchr(uid, wd->uid[0]); if (!uid || strcmp(uid, wd->uid)) return 0; match: wd->adev = adev; return 1; } /* bind auxiliary device to acpi device */ static void ljca_auxdev_acpi_bind(struct ljca_adapter *adap, struct auxiliary_device *auxdev, u64 adr, u8 id) { struct ljca_match_ids_walk_data wd = { 0 }; struct device *dev = adap->dev; struct acpi_device *parent; char uid[4]; parent = ACPI_COMPANION(dev); if (!parent) return; /* * Currently LJCA hw doesn't use _ADR instead the shipped * platforms use _HID to distinguish children devices. */ switch (adr) { case LJCA_GPIO_ACPI_ADR: wd.ids = ljca_gpio_hids; break; case LJCA_I2C1_ACPI_ADR: case LJCA_I2C2_ACPI_ADR: snprintf(uid, sizeof(uid), "%d", id); wd.uid = uid; wd.ids = ljca_i2c_hids; break; case LJCA_SPI1_ACPI_ADR: case LJCA_SPI2_ACPI_ADR: wd.ids = ljca_spi_hids; break; default: dev_warn(dev, "unsupported _ADR\n"); return; } acpi_dev_for_each_child(parent, ljca_match_device_ids, &wd); if (wd.adev) { ACPI_COMPANION_SET(&auxdev->dev, wd.adev); return; } parent = ACPI_COMPANION(dev->parent->parent); if (!parent) return; acpi_dev_for_each_child(parent, ljca_match_device_ids, &wd); if (wd.adev) ACPI_COMPANION_SET(&auxdev->dev, wd.adev); } static void ljca_auxdev_release(struct device *dev) { struct auxiliary_device *auxdev = to_auxiliary_dev(dev); kfree(auxdev->dev.platform_data); } static int ljca_new_client_device(struct ljca_adapter *adap, u8 type, u8 id, char *name, void *data, u64 adr) { struct auxiliary_device *auxdev; struct ljca_client *client; int ret; client = kzalloc(sizeof *client, GFP_KERNEL); if (!client) { kfree(data); return -ENOMEM; } client->type = type; client->id = id; client->adapter = adap; spin_lock_init(&client->event_cb_lock); auxdev = &client->auxdev; auxdev->name = name; auxdev->id = id; auxdev->dev.parent = adap->dev; auxdev->dev.platform_data = data; auxdev->dev.release = ljca_auxdev_release; ret = auxiliary_device_init(auxdev); if (ret) { kfree(data); goto err_free; } ljca_auxdev_acpi_bind(adap, auxdev, adr, id); ret = auxiliary_device_add(auxdev); if (ret) goto err_uninit; list_add_tail(&client->link, &adap->client_list); return 0; err_uninit: auxiliary_device_uninit(auxdev); err_free: kfree(client); return ret; } static int ljca_enumerate_gpio(struct ljca_adapter *adap) { u32 valid_pin[LJCA_MAX_GPIO_NUM / BITS_PER_TYPE(u32)]; struct ljca_gpio_descriptor *desc; struct ljca_gpio_info *gpio_info; u8 buf[LJCA_MAX_PAYLOAD_SIZE]; int ret, gpio_num; unsigned int i; ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_ENUM_GPIO, NULL, 0, buf, sizeof(buf), true, LJCA_ENUM_CLIENT_TIMEOUT_MS); if (ret < 0) return ret; /* check firmware response */ desc = (struct ljca_gpio_descriptor *)buf; if (ret != struct_size(desc, bank_desc, desc->bank_num)) return -EINVAL; gpio_num = desc->pins_per_bank * desc->bank_num; if (gpio_num > LJCA_MAX_GPIO_NUM) return -EINVAL; /* construct platform data */ gpio_info = kzalloc(sizeof *gpio_info, GFP_KERNEL); if (!gpio_info) return -ENOMEM; gpio_info->num = gpio_num; for (i = 0; i < desc->bank_num; i++) valid_pin[i] = get_unaligned_le32(&desc->bank_desc[i].valid_pins); bitmap_from_arr32(gpio_info->valid_pin_map, valid_pin, gpio_num); return ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio", gpio_info, LJCA_GPIO_ACPI_ADR); } static int ljca_enumerate_i2c(struct ljca_adapter *adap) { struct ljca_i2c_descriptor *desc; struct ljca_i2c_info *i2c_info; u8 buf[LJCA_MAX_PAYLOAD_SIZE]; unsigned int i; int ret; ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_ENUM_I2C, NULL, 0, buf, sizeof(buf), true, LJCA_ENUM_CLIENT_TIMEOUT_MS); if (ret < 0) return ret; /* check firmware response */ desc = (struct ljca_i2c_descriptor *)buf; if (ret != struct_size(desc, info, desc->num)) return -EINVAL; for (i = 0; i < desc->num; i++) { /* construct platform data */ i2c_info = kzalloc(sizeof *i2c_info, GFP_KERNEL); if (!i2c_info) return -ENOMEM; i2c_info->id = desc->info[i].id; i2c_info->capacity = desc->info[i].capacity; i2c_info->intr_pin = desc->info[i].intr_pin; ret = ljca_new_client_device(adap, LJCA_CLIENT_I2C, i, "ljca-i2c", i2c_info, LJCA_I2C1_ACPI_ADR + i); if (ret) return ret; } return 0; } static int ljca_enumerate_spi(struct ljca_adapter *adap) { struct ljca_spi_descriptor *desc; struct ljca_spi_info *spi_info; u8 buf[LJCA_MAX_PAYLOAD_SIZE]; unsigned int i; int ret; /* Not all LJCA chips implement SPI, a timeout reading the descriptors is normal */ ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_ENUM_SPI, NULL, 0, buf, sizeof(buf), true, LJCA_ENUM_CLIENT_TIMEOUT_MS); if (ret < 0) return (ret == -ETIMEDOUT) ? 0 : ret; /* check firmware response */ desc = (struct ljca_spi_descriptor *)buf; if (ret != struct_size(desc, info, desc->num)) return -EINVAL; for (i = 0; i < desc->num; i++) { /* construct platform data */ spi_info = kzalloc(sizeof *spi_info, GFP_KERNEL); if (!spi_info) return -ENOMEM; spi_info->id = desc->info[i].id; spi_info->capacity = desc->info[i].capacity; ret = ljca_new_client_device(adap, LJCA_CLIENT_SPI, i, "ljca-spi", spi_info, LJCA_SPI1_ACPI_ADR + i); if (ret) return ret; } return 0; } static int ljca_reset_handshake(struct ljca_adapter *adap) { __le32 reset_id = cpu_to_le32(adap->reset_id); __le32 reset_id_ret = 0; int ret; adap->reset_id++; ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_RESET, (u8 *)&reset_id, sizeof(__le32), (u8 *)&reset_id_ret, sizeof(__le32), true, LJCA_WRITE_ACK_TIMEOUT_MS); if (ret < 0) return ret; if (reset_id_ret != reset_id) return -EINVAL; return 0; } static int ljca_enumerate_clients(struct ljca_adapter *adap) { struct ljca_client *client, *next; int ret; ret = ljca_reset_handshake(adap); if (ret) goto err_kill; ret = ljca_enumerate_gpio(adap); if (ret) { dev_err(adap->dev, "enumerate GPIO error\n"); goto err_kill; } ret = ljca_enumerate_i2c(adap); if (ret) { dev_err(adap->dev, "enumerate I2C error\n"); goto err_kill; } ret = ljca_enumerate_spi(adap); if (ret) { dev_err(adap->dev, "enumerate SPI error\n"); goto err_kill; } return 0; err_kill: adap->disconnect = true; usb_kill_urb(adap->rx_urb); list_for_each_entry_safe_reverse(client, next, &adap->client_list, link) { auxiliary_device_delete(&client->auxdev); auxiliary_device_uninit(&client->auxdev); list_del_init(&client->link); kfree(client); } return ret; } static int ljca_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(interface); struct usb_host_interface *alt = interface->cur_altsetting; struct usb_endpoint_descriptor *ep_in, *ep_out; struct device *dev = &interface->dev; struct ljca_adapter *adap; int ret; adap = devm_kzalloc(dev, sizeof(*adap), GFP_KERNEL); if (!adap) return -ENOMEM; /* separate tx buffer allocation for alignment */ adap->tx_buf = devm_kzalloc(dev, LJCA_MAX_PACKET_SIZE, GFP_KERNEL); if (!adap->tx_buf) return -ENOMEM; adap->tx_buf_len = LJCA_MAX_PACKET_SIZE; mutex_init(&adap->mutex); spin_lock_init(&adap->lock); init_completion(&adap->cmd_completion); INIT_LIST_HEAD(&adap->client_list); adap->intf = usb_get_intf(interface); adap->usb_dev = usb_dev; adap->dev = dev; /* * find the first bulk in and out endpoints. * ignore any others. */ ret = usb_find_common_endpoints(alt, &ep_in, &ep_out, NULL, NULL); if (ret) { dev_err(dev, "bulk endpoints not found\n"); goto err_put; } adap->rx_pipe = usb_rcvbulkpipe(usb_dev, usb_endpoint_num(ep_in)); adap->tx_pipe = usb_sndbulkpipe(usb_dev, usb_endpoint_num(ep_out)); /* setup rx buffer */ adap->rx_len = usb_endpoint_maxp(ep_in); adap->rx_buf = devm_kzalloc(dev, adap->rx_len, GFP_KERNEL); if (!adap->rx_buf) { ret = -ENOMEM; goto err_put; } /* alloc rx urb */ adap->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!adap->rx_urb) { ret = -ENOMEM; goto err_put; } usb_fill_bulk_urb(adap->rx_urb, usb_dev, adap->rx_pipe, adap->rx_buf, adap->rx_len, ljca_recv, adap); usb_set_intfdata(interface, adap); /* submit rx urb before enumerate clients */ ret = usb_submit_urb(adap->rx_urb, GFP_KERNEL); if (ret) { dev_err(dev, "submit rx urb failed: %d\n", ret); goto err_free; } ret = ljca_enumerate_clients(adap); if (ret) goto err_free; /* * This works around problems with ov2740 initialization on some * Lenovo platforms. The autosuspend delay, has to be smaller than * the delay after setting the reset_gpio line in ov2740_resume(). * Otherwise the sensor randomly fails to initialize. */ pm_runtime_set_autosuspend_delay(&usb_dev->dev, 10); usb_enable_autosuspend(usb_dev); return 0; err_free: usb_free_urb(adap->rx_urb); err_put: usb_put_intf(adap->intf); mutex_destroy(&adap->mutex); return ret; } static void ljca_disconnect(struct usb_interface *interface) { struct ljca_adapter *adap = usb_get_intfdata(interface); struct ljca_client *client, *next; adap->disconnect = true; usb_kill_urb(adap->rx_urb); list_for_each_entry_safe_reverse(client, next, &adap->client_list, link) { auxiliary_device_delete(&client->auxdev); auxiliary_device_uninit(&client->auxdev); list_del_init(&client->link); kfree(client); } usb_free_urb(adap->rx_urb); usb_put_intf(adap->intf); mutex_destroy(&adap->mutex); } static int ljca_suspend(struct usb_interface *interface, pm_message_t message) { struct ljca_adapter *adap = usb_get_intfdata(interface); usb_kill_urb(adap->rx_urb); return 0; } static int ljca_resume(struct usb_interface *interface) { struct ljca_adapter *adap = usb_get_intfdata(interface); return usb_submit_urb(adap->rx_urb, GFP_KERNEL); } static const struct usb_device_id ljca_table[] = { { USB_DEVICE(0x8086, 0x0b63) }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(usb, ljca_table); static struct usb_driver ljca_driver = { .name = "ljca", .id_table = ljca_table, .probe = ljca_probe, .disconnect = ljca_disconnect, .suspend = ljca_suspend, .resume = ljca_resume, .supports_autosuspend = 1, }; module_usb_driver(ljca_driver); MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>"); MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>"); MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>"); MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB driver"); MODULE_LICENSE("GPL"); |
4 1 3 1 4 2 2 2 1 1 1 1 2 4 4 4 2 2 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 | // SPDX-License-Identifier: GPL-2.0-or-later /* * SQ905 subdriver * * Copyright (C) 2008, 2009 Adam Baker and Theodore Kilgore */ /* * History and Acknowledgments * * The original Linux driver for SQ905 based cameras was written by * Marcell Lengyel and further developed by many other contributors * and is available from http://sourceforge.net/projects/sqcam/ * * This driver takes advantage of the reverse engineering work done for * that driver and for libgphoto2 but shares no code with them. * * This driver has used as a base the finepix driver and other gspca * based drivers and may still contain code fragments taken from those * drivers. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq905" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/SQ905 USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define SQ905_CMD_TIMEOUT 500 #define SQ905_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define SQ905_MAX_TRANSFER 0x8000 #define FRAME_HEADER_LEN 64 /* The known modes, or registers. These go in the "value" slot. */ /* 00 is "none" obviously */ #define SQ905_BULK_READ 0x03 /* precedes any bulk read */ #define SQ905_COMMAND 0x06 /* precedes the command codes below */ #define SQ905_PING 0x07 /* when reading an "idling" command */ #define SQ905_READ_DONE 0xc0 /* ack bulk read completed */ /* Any non-zero value in the bottom 2 bits of the 2nd byte of * the ID appears to indicate the camera can do 640*480. If the * LSB of that byte is set the image is just upside down, otherwise * it is rotated 180 degrees. */ #define SQ905_HIRES_MASK 0x00000300 #define SQ905_ORIENTATION_MASK 0x00000100 /* Some command codes. These go in the "index" slot. */ #define SQ905_ID 0xf0 /* asks for model string */ #define SQ905_CONFIG 0x20 /* gets photo alloc. table, not used here */ #define SQ905_DATA 0x30 /* accesses photo data, not used here */ #define SQ905_CLEAR 0xa0 /* clear everything */ #define SQ905_CAPTURE_LOW 0x60 /* Starts capture at 160x120 */ #define SQ905_CAPTURE_MED 0x61 /* Starts capture at 320x240 */ #define SQ905_CAPTURE_HIGH 0x62 /* Starts capture at 640x480 (some cams only) */ /* note that the capture command also controls the output dimensions */ /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ /* * Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; static struct v4l2_pix_format sq905_mode[] = { { 160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* * Send a command to the camera. */ static int sq905_command(struct gspca_dev *gspca_dev, u16 index) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_COMMAND, index, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_PING, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed 2 (%d)\n", __func__, ret); return ret; } return 0; } /* * Acknowledge the end of a frame - see warning on sq905_command. */ static int sq905_ack_frame(struct gspca_dev *gspca_dev) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_READ_DONE, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } /* * request and read a block of data - see warning on sq905_command. */ static int sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock) { int ret; int act_len = 0; gspca_dev->usb_buf[0] = '\0'; if (need_lock) mutex_lock(&gspca_dev->usb_lock); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_BULK_READ, size, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (need_lock) mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), data, size, &act_len, SQ905_DATA_TIMEOUT); /* successful, it returns 0, otherwise negative */ if (ret < 0 || act_len != size) { pr_err("bulk read fail (%d) len %d/%d\n", ret, act_len, size); return -EIO; } return 0; } /* * This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use gspca_dev->usb_buf we take the usb_lock when * performing USB operations using it. In practice we don't really need this * as the camera doesn't provide any controls. */ static void sq905_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int header_read; /* true if we have already read the frame header. */ int packet_type; int frame_sz; int ret; u8 *data; u8 *buffer; buffer = kmalloc(SQ905_MAX_TRANSFER, GFP_KERNEL); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage + FRAME_HEADER_LEN; while (gspca_dev->present && gspca_dev->streaming) { #ifdef CONFIG_PM if (gspca_dev->frozen) break; #endif /* request some data and then read it until we have * a complete frame. */ bytes_left = frame_sz; header_read = 0; /* Note we do not check for gspca_dev->streaming here, as we must finish reading an entire frame, otherwise the next time we stream we start reading in the middle of a frame. */ while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > SQ905_MAX_TRANSFER ? SQ905_MAX_TRANSFER : bytes_left; ret = sq905_read_data(gspca_dev, buffer, data_len, 1); if (ret < 0) goto quit_stream; gspca_dbg(gspca_dev, D_PACK, "Got %d bytes out of %d for frame\n", data_len, bytes_left); bytes_left -= data_len; data = buffer; if (!header_read) { packet_type = FIRST_PACKET; /* The first 64 bytes of each frame are * a header full of FF 00 bytes */ data += FRAME_HEADER_LEN; data_len -= FRAME_HEADER_LEN; header_read = 1; } else if (bytes_left == 0) { packet_type = LAST_PACKET; } else { packet_type = INTER_PACKET; } gspca_frame_add(gspca_dev, packet_type, data, data_len); /* If entire frame fits in one packet we still need to add a LAST_PACKET */ if (packet_type == FIRST_PACKET && bytes_left == 0) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } if (gspca_dev->present) { /* acknowledge the frame */ mutex_lock(&gspca_dev->usb_lock); ret = sq905_ack_frame(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) goto quit_stream; } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); sq905_command(gspca_dev, SQ905_CLEAR); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk = 1; cam->bulk_size = 64; INIT_WORK(&dev->work_struct, sq905_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u32 ident; int ret; /* connect to the camera and read * the model ID and process that and put it away. */ ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; ret = sq905_command(gspca_dev, SQ905_ID); if (ret < 0) return ret; ret = sq905_read_data(gspca_dev, gspca_dev->usb_buf, 4, 0); if (ret < 0) return ret; /* usb_buf is allocated with kmalloc so is aligned. * Camera model number is the right way round if we assume this * reverse engineered ID is supposed to be big endian. */ ident = be32_to_cpup((__be32 *)gspca_dev->usb_buf); ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; gspca_dbg(gspca_dev, D_CONF, "SQ905 camera ID %08x detected\n", ident); gspca_dev->cam.cam_mode = sq905_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(sq905_mode); if (!(ident & SQ905_HIRES_MASK)) gspca_dev->cam.nmodes--; if (ident & SQ905_ORIENTATION_MASK) gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP; else gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP; return 0; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; /* "Open the shutter" and set size, to start capture */ switch (gspca_dev->curr_mode) { default: /* case 2: */ gspca_dbg(gspca_dev, D_STREAM, "Start streaming at high resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_HIGH); break; case 1: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at medium resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_MED); break; case 0: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at low resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_LOW); } if (ret < 0) { gspca_err(gspca_dev, "Start streaming command failed\n"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); if (!dev->work_thread) return -ENOMEM; queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x2770, 0x9120)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
9 9 9 9 9 9 9 125 125 123 123 124 125 124 125 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 | // SPDX-License-Identifier: GPL-2.0 #include <linux/irq_work.h> #include <linux/spinlock.h> #include <linux/task_work.h> #include <linux/resume_user_mode.h> static struct callback_head work_exited; /* all we need is ->next == NULL */ #ifdef CONFIG_IRQ_WORK static void task_work_set_notify_irq(struct irq_work *entry) { test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME); } static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) = IRQ_WORK_INIT_HARD(task_work_set_notify_irq); #endif /** * task_work_add - ask the @task to execute @work->func() * @task: the task which should run the callback * @work: the callback to run * @notify: how to notify the targeted task * * Queue @work for task_work_run() below and notify the @task if @notify * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT. * * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted * task and run the task_work, regardless of whether the task is currently * running in the kernel or userspace. * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a * reschedule IPI to force the targeted task to reschedule and run task_work. * This can be advantageous if there's no strict requirement that the * task_work be run as soon as possible, just whenever the task enters the * kernel anyway. * @TWA_RESUME work is run only when the task exits the kernel and returns to * user mode, or before entering guest mode. * @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the * current @task and if the current context is NMI. * * Fails if the @task is exiting/exited and thus it can't process this @work. * Otherwise @work->func() will be called when the @task goes through one of * the aforementioned transitions, or exits. * * If the targeted task is exiting, then an error is returned and the work item * is not queued. It's up to the caller to arrange for an alternative mechanism * in that case. * * Note: there is no ordering guarantee on works queued here. The task_work * list is LIFO. * * RETURNS: * 0 if succeeds or -ESRCH. */ int task_work_add(struct task_struct *task, struct callback_head *work, enum task_work_notify_mode notify) { struct callback_head *head; if (notify == TWA_NMI_CURRENT) { if (WARN_ON_ONCE(task != current)) return -EINVAL; if (!IS_ENABLED(CONFIG_IRQ_WORK)) return -EINVAL; } else { kasan_record_aux_stack(work); } head = READ_ONCE(task->task_works); do { if (unlikely(head == &work_exited)) return -ESRCH; work->next = head; } while (!try_cmpxchg(&task->task_works, &head, work)); switch (notify) { case TWA_NONE: break; case TWA_RESUME: set_notify_resume(task); break; case TWA_SIGNAL: set_notify_signal(task); break; case TWA_SIGNAL_NO_IPI: __set_notify_signal(task); break; #ifdef CONFIG_IRQ_WORK case TWA_NMI_CURRENT: irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume)); break; #endif default: WARN_ON_ONCE(1); break; } return 0; } /** * task_work_cancel_match - cancel a pending work added by task_work_add() * @task: the task which should execute the work * @match: match function to call * @data: data to be passed in to match function * * RETURNS: * The found work or NULL if not found. */ struct callback_head * task_work_cancel_match(struct task_struct *task, bool (*match)(struct callback_head *, void *data), void *data) { struct callback_head **pprev = &task->task_works; struct callback_head *work; unsigned long flags; if (likely(!task_work_pending(task))) return NULL; /* * If cmpxchg() fails we continue without updating pprev. * Either we raced with task_work_add() which added the * new entry before this work, we will find it again. Or * we raced with task_work_run(), *pprev == NULL/exited. */ raw_spin_lock_irqsave(&task->pi_lock, flags); work = READ_ONCE(*pprev); while (work) { if (!match(work, data)) { pprev = &work->next; work = READ_ONCE(*pprev); } else if (try_cmpxchg(pprev, &work, work->next)) break; } raw_spin_unlock_irqrestore(&task->pi_lock, flags); return work; } static bool task_work_func_match(struct callback_head *cb, void *data) { return cb->func == data; } /** * task_work_cancel_func - cancel a pending work matching a function added by task_work_add() * @task: the task which should execute the func's work * @func: identifies the func to match with a work to remove * * Find the last queued pending work with ->func == @func and remove * it from queue. * * RETURNS: * The found work or NULL if not found. */ struct callback_head * task_work_cancel_func(struct task_struct *task, task_work_func_t func) { return task_work_cancel_match(task, task_work_func_match, func); } static bool task_work_match(struct callback_head *cb, void *data) { return cb == data; } /** * task_work_cancel - cancel a pending work added by task_work_add() * @task: the task which should execute the work * @cb: the callback to remove if queued * * Remove a callback from a task's queue if queued. * * RETURNS: * True if the callback was queued and got cancelled, false otherwise. */ bool task_work_cancel(struct task_struct *task, struct callback_head *cb) { struct callback_head *ret; ret = task_work_cancel_match(task, task_work_match, cb); return ret == cb; } /** * task_work_run - execute the works added by task_work_add() * * Flush the pending works. Should be used by the core kernel code. * Called before the task returns to the user-mode or stops, or when * it exits. In the latter case task_work_add() can no longer add the * new work after task_work_run() returns. */ void task_work_run(void) { struct task_struct *task = current; struct callback_head *work, *head, *next; for (;;) { /* * work->func() can do task_work_add(), do not set * work_exited unless the list is empty. */ work = READ_ONCE(task->task_works); do { head = NULL; if (!work) { if (task->flags & PF_EXITING) head = &work_exited; else break; } } while (!try_cmpxchg(&task->task_works, &work, head)); if (!work) break; /* * Synchronize with task_work_cancel_match(). It can not remove * the first entry == work, cmpxchg(task_works) must fail. * But it can remove another entry from the ->next list. */ raw_spin_lock_irq(&task->pi_lock); raw_spin_unlock_irq(&task->pi_lock); do { next = work->next; work->func(work); work = next; cond_resched(); } while (work); } } |
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 | // SPDX-License-Identifier: GPL-2.0+ /* * Driver for Alauda-based card readers * * Current development and maintenance by: * (c) 2005 Daniel Drake <dsd@gentoo.org> * * The 'Alauda' is a chip manufacturered by RATOC for OEM use. * * Alauda implements a vendor-specific command set to access two media reader * ports (XD, SmartMedia). This driver converts SCSI commands to the commands * which are accepted by these devices. * * The driver was developed through reverse-engineering, with the help of the * sddr09 driver which has many similarities, and with some help from the * (very old) vendor-supplied GPL sma03 driver. * * For protocol info, see http://alauda.sourceforge.net */ #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-alauda" MODULE_DESCRIPTION("Driver for Alauda-based card readers"); MODULE_AUTHOR("Daniel Drake <dsd@gentoo.org>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); /* * Status bytes */ #define ALAUDA_STATUS_ERROR 0x01 #define ALAUDA_STATUS_READY 0x40 /* * Control opcodes (for request field) */ #define ALAUDA_GET_XD_MEDIA_STATUS 0x08 #define ALAUDA_GET_SM_MEDIA_STATUS 0x98 #define ALAUDA_ACK_XD_MEDIA_CHANGE 0x0a #define ALAUDA_ACK_SM_MEDIA_CHANGE 0x9a #define ALAUDA_GET_XD_MEDIA_SIG 0x86 #define ALAUDA_GET_SM_MEDIA_SIG 0x96 /* * Bulk command identity (byte 0) */ #define ALAUDA_BULK_CMD 0x40 /* * Bulk opcodes (byte 1) */ #define ALAUDA_BULK_GET_REDU_DATA 0x85 #define ALAUDA_BULK_READ_BLOCK 0x94 #define ALAUDA_BULK_ERASE_BLOCK 0xa3 #define ALAUDA_BULK_WRITE_BLOCK 0xb4 #define ALAUDA_BULK_GET_STATUS2 0xb7 #define ALAUDA_BULK_RESET_MEDIA 0xe0 /* * Port to operate on (byte 8) */ #define ALAUDA_PORT_XD 0x00 #define ALAUDA_PORT_SM 0x01 /* * LBA and PBA are unsigned ints. Special values. */ #define UNDEF 0xffff #define SPARE 0xfffe #define UNUSABLE 0xfffd struct alauda_media_info { unsigned long capacity; /* total media size in bytes */ unsigned int pagesize; /* page size in bytes */ unsigned int blocksize; /* number of pages per block */ unsigned int uzonesize; /* number of usable blocks per zone */ unsigned int zonesize; /* number of blocks per zone */ unsigned int blockmask; /* mask to get page from address */ unsigned char pageshift; unsigned char blockshift; unsigned char zoneshift; u16 **lba_to_pba; /* logical to physical block map */ u16 **pba_to_lba; /* physical to logical block map */ }; struct alauda_info { struct alauda_media_info port[2]; int wr_ep; /* endpoint to write data out of */ unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ bool media_initialized; }; #define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) ) #define LSB_of(s) ((s)&0xFF) #define MSB_of(s) ((s)>>8) #define MEDIA_PORT(us) us->srb->device->lun #define MEDIA_INFO(us) ((struct alauda_info *)us->extra)->port[MEDIA_PORT(us)] #define PBA_LO(pba) ((pba & 0xF) << 5) #define PBA_HI(pba) (pba >> 3) #define PBA_ZONE(pba) (pba >> 11) static int init_alauda(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static const struct usb_device_id alauda_usb_ids[] = { # include "unusual_alauda.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, alauda_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static const struct us_unusual_dev alauda_unusual_dev_list[] = { # include "unusual_alauda.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* * Media handling */ struct alauda_card_info { unsigned char id; /* id byte */ unsigned char chipshift; /* 1<<cs bytes total capacity */ unsigned char pageshift; /* 1<<ps bytes in a page */ unsigned char blockshift; /* 1<<bs pages per block */ unsigned char zoneshift; /* 1<<zs blocks per zone */ }; static const struct alauda_card_info alauda_card_ids[] = { /* NAND flash */ { 0x6e, 20, 8, 4, 8}, /* 1 MB */ { 0xe8, 20, 8, 4, 8}, /* 1 MB */ { 0xec, 20, 8, 4, 8}, /* 1 MB */ { 0x64, 21, 8, 4, 9}, /* 2 MB */ { 0xea, 21, 8, 4, 9}, /* 2 MB */ { 0x6b, 22, 9, 4, 9}, /* 4 MB */ { 0xe3, 22, 9, 4, 9}, /* 4 MB */ { 0xe5, 22, 9, 4, 9}, /* 4 MB */ { 0xe6, 23, 9, 4, 10}, /* 8 MB */ { 0x73, 24, 9, 5, 10}, /* 16 MB */ { 0x75, 25, 9, 5, 10}, /* 32 MB */ { 0x76, 26, 9, 5, 10}, /* 64 MB */ { 0x79, 27, 9, 5, 10}, /* 128 MB */ { 0x71, 28, 9, 5, 10}, /* 256 MB */ /* MASK ROM */ { 0x5d, 21, 9, 4, 8}, /* 2 MB */ { 0xd5, 22, 9, 4, 9}, /* 4 MB */ { 0xd6, 23, 9, 4, 10}, /* 8 MB */ { 0x57, 24, 9, 4, 11}, /* 16 MB */ { 0x58, 25, 9, 4, 12}, /* 32 MB */ { 0,} }; static const struct alauda_card_info *alauda_card_find_id(unsigned char id) { int i; for (i = 0; alauda_card_ids[i].id != 0; i++) if (alauda_card_ids[i].id == id) return &(alauda_card_ids[i]); return NULL; } /* * ECC computation. */ static unsigned char parity[256]; static unsigned char ecc2[256]; static void nand_init_ecc(void) { int i, j, a; parity[0] = 0; for (i = 1; i < 256; i++) parity[i] = (parity[i&(i-1)] ^ 1); for (i = 0; i < 256; i++) { a = 0; for (j = 0; j < 8; j++) { if (i & (1<<j)) { if ((j & 1) == 0) a ^= 0x04; if ((j & 2) == 0) a ^= 0x10; if ((j & 4) == 0) a ^= 0x40; } } ecc2[i] = ~(a ^ (a<<1) ^ (parity[i] ? 0xa8 : 0)); } } /* compute 3-byte ecc on 256 bytes */ static void nand_compute_ecc(unsigned char *data, unsigned char *ecc) { int i, j, a; unsigned char par = 0, bit, bits[8] = {0}; /* collect 16 checksum bits */ for (i = 0; i < 256; i++) { par ^= data[i]; bit = parity[data[i]]; for (j = 0; j < 8; j++) if ((i & (1<<j)) == 0) bits[j] ^= bit; } /* put 4+4+4 = 12 bits in the ecc */ a = (bits[3] << 6) + (bits[2] << 4) + (bits[1] << 2) + bits[0]; ecc[0] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0)); a = (bits[7] << 6) + (bits[6] << 4) + (bits[5] << 2) + bits[4]; ecc[1] = ~(a ^ (a<<1) ^ (parity[par] ? 0xaa : 0)); ecc[2] = ecc2[par]; } static int nand_compare_ecc(unsigned char *data, unsigned char *ecc) { return (data[0] == ecc[0] && data[1] == ecc[1] && data[2] == ecc[2]); } static void nand_store_ecc(unsigned char *data, unsigned char *ecc) { memcpy(data, ecc, 3); } /* * Alauda driver */ /* * Forget our PBA <---> LBA mappings for a particular port */ static void alauda_free_maps (struct alauda_media_info *media_info) { unsigned int shift = media_info->zoneshift + media_info->blockshift + media_info->pageshift; unsigned int num_zones = media_info->capacity >> shift; unsigned int i; if (media_info->lba_to_pba != NULL) for (i = 0; i < num_zones; i++) { kfree(media_info->lba_to_pba[i]); media_info->lba_to_pba[i] = NULL; } if (media_info->pba_to_lba != NULL) for (i = 0; i < num_zones; i++) { kfree(media_info->pba_to_lba[i]); media_info->pba_to_lba[i] = NULL; } } /* * Returns 2 bytes of status data * The first byte describes media status, and second byte describes door status */ static int alauda_get_media_status(struct us_data *us, unsigned char *data) { int rc; unsigned char command; if (MEDIA_PORT(us) == ALAUDA_PORT_XD) command = ALAUDA_GET_XD_MEDIA_STATUS; else command = ALAUDA_GET_SM_MEDIA_STATUS; rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, command, 0xc0, 0, 1, data, 2); if (rc == USB_STOR_XFER_GOOD) usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); return rc; } /* * Clears the "media was changed" bit so that we know when it changes again * in the future. */ static int alauda_ack_media(struct us_data *us) { unsigned char command; if (MEDIA_PORT(us) == ALAUDA_PORT_XD) command = ALAUDA_ACK_XD_MEDIA_CHANGE; else command = ALAUDA_ACK_SM_MEDIA_CHANGE; return usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, command, 0x40, 0, 1, NULL, 0); } /* * Retrieves a 4-byte media signature, which indicates manufacturer, capacity, * and some other details. */ static int alauda_get_media_signature(struct us_data *us, unsigned char *data) { unsigned char command; if (MEDIA_PORT(us) == ALAUDA_PORT_XD) command = ALAUDA_GET_XD_MEDIA_SIG; else command = ALAUDA_GET_SM_MEDIA_SIG; return usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, command, 0xc0, 0, 0, data, 4); } /* * Resets the media status (but not the whole device?) */ static int alauda_reset_media(struct us_data *us) { unsigned char *command = us->iobuf; memset(command, 0, 9); command[0] = ALAUDA_BULK_CMD; command[1] = ALAUDA_BULK_RESET_MEDIA; command[8] = MEDIA_PORT(us); return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); } /* * Examines the media and deduces capacity, etc. */ static int alauda_init_media(struct us_data *us) { unsigned char *data = us->iobuf; int ready = 0; const struct alauda_card_info *media_info; unsigned int num_zones; while (ready == 0) { msleep(20); if (alauda_get_media_status(us, data) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (data[0] & 0x10) ready = 1; } usb_stor_dbg(us, "We are ready for action!\n"); if (alauda_ack_media(us) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; msleep(10); if (alauda_get_media_status(us, data) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; if (data[0] != 0x14) { usb_stor_dbg(us, "Media not ready after ack\n"); return USB_STOR_TRANSPORT_ERROR; } if (alauda_get_media_signature(us, data) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "Media signature: %4ph\n", data); media_info = alauda_card_find_id(data[1]); if (media_info == NULL) { pr_warn("alauda_init_media: Unrecognised media signature: %4ph\n", data); return USB_STOR_TRANSPORT_ERROR; } MEDIA_INFO(us).capacity = 1 << media_info->chipshift; usb_stor_dbg(us, "Found media with capacity: %ldMB\n", MEDIA_INFO(us).capacity >> 20); MEDIA_INFO(us).pageshift = media_info->pageshift; MEDIA_INFO(us).blockshift = media_info->blockshift; MEDIA_INFO(us).zoneshift = media_info->zoneshift; MEDIA_INFO(us).pagesize = 1 << media_info->pageshift; MEDIA_INFO(us).blocksize = 1 << media_info->blockshift; MEDIA_INFO(us).zonesize = 1 << media_info->zoneshift; MEDIA_INFO(us).uzonesize = ((1 << media_info->zoneshift) / 128) * 125; MEDIA_INFO(us).blockmask = MEDIA_INFO(us).blocksize - 1; num_zones = MEDIA_INFO(us).capacity >> (MEDIA_INFO(us).zoneshift + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift); MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO); if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL) return USB_STOR_TRANSPORT_ERROR; if (alauda_reset_media(us) != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; return USB_STOR_TRANSPORT_GOOD; } /* * Examines the media status and does the right thing when the media has gone, * appeared, or changed. */ static int alauda_check_media(struct us_data *us) { struct alauda_info *info = (struct alauda_info *) us->extra; unsigned char *status = us->iobuf; int rc; rc = alauda_get_media_status(us, status); if (rc != USB_STOR_XFER_GOOD) { status[0] = 0xF0; /* Pretend there's no media */ status[1] = 0; } /* Check for no media or door open */ if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10) || ((status[1] & 0x01) == 0)) { usb_stor_dbg(us, "No media, or door open\n"); alauda_free_maps(&MEDIA_INFO(us)); info->sense_key = 0x02; info->sense_asc = 0x3A; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } /* Check for media change */ if (status[0] & 0x08 || !info->media_initialized) { usb_stor_dbg(us, "Media change detected\n"); alauda_free_maps(&MEDIA_INFO(us)); rc = alauda_init_media(us); if (rc == USB_STOR_TRANSPORT_GOOD) info->media_initialized = true; info->sense_key = UNIT_ATTENTION; info->sense_asc = 0x28; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } return USB_STOR_TRANSPORT_GOOD; } /* * Checks the status from the 2nd status register * Returns 3 bytes of status data, only the first is known */ static int alauda_check_status2(struct us_data *us) { int rc; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_GET_STATUS2, 0, 0, 0, 0, 3, 0, MEDIA_PORT(us) }; unsigned char data[3]; rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; rc = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, 3, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; usb_stor_dbg(us, "%3ph\n", data); if (data[0] & ALAUDA_STATUS_ERROR) return USB_STOR_XFER_ERROR; return USB_STOR_XFER_GOOD; } /* * Gets the redundancy data for the first page of a PBA * Returns 16 bytes. */ static int alauda_get_redu_data(struct us_data *us, u16 pba, unsigned char *data) { int rc; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_GET_REDU_DATA, PBA_HI(pba), PBA_ZONE(pba), 0, PBA_LO(pba), 0, 0, MEDIA_PORT(us) }; rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, 16, NULL); } /* * Finds the first unused PBA in a zone * Returns the absolute PBA of an unused PBA, or 0 if none found. */ static u16 alauda_find_unused_pba(struct alauda_media_info *info, unsigned int zone) { u16 *pba_to_lba = info->pba_to_lba[zone]; unsigned int i; for (i = 0; i < info->zonesize; i++) if (pba_to_lba[i] == UNDEF) return (zone << info->zoneshift) + i; return 0; } /* * Reads the redundancy data for all PBA's in a zone * Produces lba <--> pba mappings */ static int alauda_read_map(struct us_data *us, unsigned int zone) { unsigned char *data = us->iobuf; int result; int i, j; unsigned int zonesize = MEDIA_INFO(us).zonesize; unsigned int uzonesize = MEDIA_INFO(us).uzonesize; unsigned int lba_offset, lba_real, blocknum; unsigned int zone_base_lba = zone * uzonesize; unsigned int zone_base_pba = zone * zonesize; u16 *lba_to_pba = kcalloc(zonesize, sizeof(u16), GFP_NOIO); u16 *pba_to_lba = kcalloc(zonesize, sizeof(u16), GFP_NOIO); if (lba_to_pba == NULL || pba_to_lba == NULL) { result = USB_STOR_TRANSPORT_ERROR; goto error; } usb_stor_dbg(us, "Mapping blocks for zone %d\n", zone); /* 1024 PBA's per zone */ for (i = 0; i < zonesize; i++) lba_to_pba[i] = pba_to_lba[i] = UNDEF; for (i = 0; i < zonesize; i++) { blocknum = zone_base_pba + i; result = alauda_get_redu_data(us, blocknum, data); if (result != USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; goto error; } /* special PBAs have control field 0^16 */ for (j = 0; j < 16; j++) if (data[j] != 0) goto nonz; pba_to_lba[i] = UNUSABLE; usb_stor_dbg(us, "PBA %d has no logical mapping\n", blocknum); continue; nonz: /* unwritten PBAs have control field FF^16 */ for (j = 0; j < 16; j++) if (data[j] != 0xff) goto nonff; continue; nonff: /* normal PBAs start with six FFs */ if (j < 6) { usb_stor_dbg(us, "PBA %d has no logical mapping: reserved area = %02X%02X%02X%02X data status %02X block status %02X\n", blocknum, data[0], data[1], data[2], data[3], data[4], data[5]); pba_to_lba[i] = UNUSABLE; continue; } if ((data[6] >> 4) != 0x01) { usb_stor_dbg(us, "PBA %d has invalid address field %02X%02X/%02X%02X\n", blocknum, data[6], data[7], data[11], data[12]); pba_to_lba[i] = UNUSABLE; continue; } /* check even parity */ if (parity[data[6] ^ data[7]]) { printk(KERN_WARNING "alauda_read_map: Bad parity in LBA for block %d" " (%02X %02X)\n", i, data[6], data[7]); pba_to_lba[i] = UNUSABLE; continue; } lba_offset = short_pack(data[7], data[6]); lba_offset = (lba_offset & 0x07FF) >> 1; lba_real = lba_offset + zone_base_lba; /* * Every 1024 physical blocks ("zone"), the LBA numbers * go back to zero, but are within a higher block of LBA's. * Also, there is a maximum of 1000 LBA's per zone. * In other words, in PBA 1024-2047 you will find LBA 0-999 * which are really LBA 1000-1999. This allows for 24 bad * or special physical blocks per zone. */ if (lba_offset >= uzonesize) { printk(KERN_WARNING "alauda_read_map: Bad low LBA %d for block %d\n", lba_real, blocknum); continue; } if (lba_to_pba[lba_offset] != UNDEF) { printk(KERN_WARNING "alauda_read_map: " "LBA %d seen for PBA %d and %d\n", lba_real, lba_to_pba[lba_offset], blocknum); continue; } pba_to_lba[i] = lba_real; lba_to_pba[lba_offset] = blocknum; continue; } MEDIA_INFO(us).lba_to_pba[zone] = lba_to_pba; MEDIA_INFO(us).pba_to_lba[zone] = pba_to_lba; result = 0; goto out; error: kfree(lba_to_pba); kfree(pba_to_lba); out: return result; } /* * Checks to see whether we have already mapped a certain zone * If we haven't, the map is generated */ static void alauda_ensure_map_for_zone(struct us_data *us, unsigned int zone) { if (MEDIA_INFO(us).lba_to_pba[zone] == NULL || MEDIA_INFO(us).pba_to_lba[zone] == NULL) alauda_read_map(us, zone); } /* * Erases an entire block */ static int alauda_erase_block(struct us_data *us, u16 pba) { int rc; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_ERASE_BLOCK, PBA_HI(pba), PBA_ZONE(pba), 0, PBA_LO(pba), 0x02, 0, MEDIA_PORT(us) }; unsigned char buf[2]; usb_stor_dbg(us, "Erasing PBA %d\n", pba); rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; rc = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, buf, 2, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; usb_stor_dbg(us, "Erase result: %02X %02X\n", buf[0], buf[1]); return rc; } /* * Reads data from a certain offset page inside a PBA, including interleaved * redundancy data. Returns (pagesize+64)*pages bytes in data. */ static int alauda_read_block_raw(struct us_data *us, u16 pba, unsigned int page, unsigned int pages, unsigned char *data) { int rc; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_READ_BLOCK, PBA_HI(pba), PBA_ZONE(pba), 0, PBA_LO(pba) + page, pages, 0, MEDIA_PORT(us) }; usb_stor_dbg(us, "pba %d page %d count %d\n", pba, page, pages); rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data, (MEDIA_INFO(us).pagesize + 64) * pages, NULL); } /* * Reads data from a certain offset page inside a PBA, excluding redundancy * data. Returns pagesize*pages bytes in data. Note that data must be big enough * to hold (pagesize+64)*pages bytes of data, but you can ignore those 'extra' * trailing bytes outside this function. */ static int alauda_read_block(struct us_data *us, u16 pba, unsigned int page, unsigned int pages, unsigned char *data) { int i, rc; unsigned int pagesize = MEDIA_INFO(us).pagesize; rc = alauda_read_block_raw(us, pba, page, pages, data); if (rc != USB_STOR_XFER_GOOD) return rc; /* Cut out the redundancy data */ for (i = 0; i < pages; i++) { int dest_offset = i * pagesize; int src_offset = i * (pagesize + 64); memmove(data + dest_offset, data + src_offset, pagesize); } return rc; } /* * Writes an entire block of data and checks status after write. * Redundancy data must be already included in data. Data should be * (pagesize+64)*blocksize bytes in length. */ static int alauda_write_block(struct us_data *us, u16 pba, unsigned char *data) { int rc; struct alauda_info *info = (struct alauda_info *) us->extra; unsigned char command[] = { ALAUDA_BULK_CMD, ALAUDA_BULK_WRITE_BLOCK, PBA_HI(pba), PBA_ZONE(pba), 0, PBA_LO(pba), 32, 0, MEDIA_PORT(us) }; usb_stor_dbg(us, "pba %d\n", pba); rc = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, command, 9, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; rc = usb_stor_bulk_transfer_buf(us, info->wr_ep, data, (MEDIA_INFO(us).pagesize + 64) * MEDIA_INFO(us).blocksize, NULL); if (rc != USB_STOR_XFER_GOOD) return rc; return alauda_check_status2(us); } /* * Write some data to a specific LBA. */ static int alauda_write_lba(struct us_data *us, u16 lba, unsigned int page, unsigned int pages, unsigned char *ptr, unsigned char *blockbuffer) { u16 pba, lbap, new_pba; unsigned char *bptr, *cptr, *xptr; unsigned char ecc[3]; int i, result; unsigned int uzonesize = MEDIA_INFO(us).uzonesize; unsigned int zonesize = MEDIA_INFO(us).zonesize; unsigned int pagesize = MEDIA_INFO(us).pagesize; unsigned int blocksize = MEDIA_INFO(us).blocksize; unsigned int lba_offset = lba % uzonesize; unsigned int new_pba_offset; unsigned int zone = lba / uzonesize; alauda_ensure_map_for_zone(us, zone); pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset]; if (pba == 1) { /* * Maybe it is impossible to write to PBA 1. * Fake success, but don't do anything. */ printk(KERN_WARNING "alauda_write_lba: avoid writing to pba 1\n"); return USB_STOR_TRANSPORT_GOOD; } new_pba = alauda_find_unused_pba(&MEDIA_INFO(us), zone); if (!new_pba) { printk(KERN_WARNING "alauda_write_lba: Out of unused blocks\n"); return USB_STOR_TRANSPORT_ERROR; } /* read old contents */ if (pba != UNDEF) { result = alauda_read_block_raw(us, pba, 0, blocksize, blockbuffer); if (result != USB_STOR_XFER_GOOD) return result; } else { memset(blockbuffer, 0, blocksize * (pagesize + 64)); } lbap = (lba_offset << 1) | 0x1000; if (parity[MSB_of(lbap) ^ LSB_of(lbap)]) lbap ^= 1; /* check old contents and fill lba */ for (i = 0; i < blocksize; i++) { bptr = blockbuffer + (i * (pagesize + 64)); cptr = bptr + pagesize; nand_compute_ecc(bptr, ecc); if (!nand_compare_ecc(cptr+13, ecc)) { usb_stor_dbg(us, "Warning: bad ecc in page %d- of pba %d\n", i, pba); nand_store_ecc(cptr+13, ecc); } nand_compute_ecc(bptr + (pagesize / 2), ecc); if (!nand_compare_ecc(cptr+8, ecc)) { usb_stor_dbg(us, "Warning: bad ecc in page %d+ of pba %d\n", i, pba); nand_store_ecc(cptr+8, ecc); } cptr[6] = cptr[11] = MSB_of(lbap); cptr[7] = cptr[12] = LSB_of(lbap); } /* copy in new stuff and compute ECC */ xptr = ptr; for (i = page; i < page+pages; i++) { bptr = blockbuffer + (i * (pagesize + 64)); cptr = bptr + pagesize; memcpy(bptr, xptr, pagesize); xptr += pagesize; nand_compute_ecc(bptr, ecc); nand_store_ecc(cptr+13, ecc); nand_compute_ecc(bptr + (pagesize / 2), ecc); nand_store_ecc(cptr+8, ecc); } result = alauda_write_block(us, new_pba, blockbuffer); if (result != USB_STOR_XFER_GOOD) return result; new_pba_offset = new_pba - (zone * zonesize); MEDIA_INFO(us).pba_to_lba[zone][new_pba_offset] = lba; MEDIA_INFO(us).lba_to_pba[zone][lba_offset] = new_pba; usb_stor_dbg(us, "Remapped LBA %d to PBA %d\n", lba, new_pba); if (pba != UNDEF) { unsigned int pba_offset = pba - (zone * zonesize); result = alauda_erase_block(us, pba); if (result != USB_STOR_XFER_GOOD) return result; MEDIA_INFO(us).pba_to_lba[zone][pba_offset] = UNDEF; } return USB_STOR_TRANSPORT_GOOD; } /* * Read data from a specific sector address */ static int alauda_read_data(struct us_data *us, unsigned long address, unsigned int sectors) { unsigned char *buffer; u16 lba, max_lba; unsigned int page, len, offset; unsigned int blockshift = MEDIA_INFO(us).blockshift; unsigned int pageshift = MEDIA_INFO(us).pageshift; unsigned int blocksize = MEDIA_INFO(us).blocksize; unsigned int pagesize = MEDIA_INFO(us).pagesize; unsigned int uzonesize = MEDIA_INFO(us).uzonesize; struct scatterlist *sg; int result; /* * Since we only read in one block at a time, we have to create * a bounce buffer and move the data a piece at a time between the * bounce buffer and the actual transfer buffer. * We make this buffer big enough to hold temporary redundancy data, * which we use when reading the data blocks. */ len = min(sectors, blocksize) * (pagesize + 64); buffer = kmalloc(len, GFP_NOIO); if (!buffer) return USB_STOR_TRANSPORT_ERROR; /* Figure out the initial LBA and page */ lba = address >> blockshift; page = (address & MEDIA_INFO(us).blockmask); max_lba = MEDIA_INFO(us).capacity >> (blockshift + pageshift); result = USB_STOR_TRANSPORT_GOOD; offset = 0; sg = NULL; while (sectors > 0) { unsigned int zone = lba / uzonesize; /* integer division */ unsigned int lba_offset = lba - (zone * uzonesize); unsigned int pages; u16 pba; alauda_ensure_map_for_zone(us, zone); /* Not overflowing capacity? */ if (lba >= max_lba) { usb_stor_dbg(us, "Error: Requested lba %u exceeds maximum %u\n", lba, max_lba); result = USB_STOR_TRANSPORT_ERROR; break; } /* Find number of pages we can read in this block */ pages = min(sectors, blocksize - page); len = pages << pageshift; /* Find where this lba lives on disk */ pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset]; if (pba == UNDEF) { /* this lba was never written */ usb_stor_dbg(us, "Read %d zero pages (LBA %d) page %d\n", pages, lba, page); /* * This is not really an error. It just means * that the block has never been written. * Instead of returning USB_STOR_TRANSPORT_ERROR * it is better to return all zero data. */ memset(buffer, 0, len); } else { usb_stor_dbg(us, "Read %d pages, from PBA %d (LBA %d) page %d\n", pages, pba, lba, page); result = alauda_read_block(us, pba, page, pages, buffer); if (result != USB_STOR_TRANSPORT_GOOD) break; } /* Store the data in the transfer buffer */ usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &offset, TO_XFER_BUF); page = 0; lba++; sectors -= pages; } kfree(buffer); return result; } /* * Write data to a specific sector address */ static int alauda_write_data(struct us_data *us, unsigned long address, unsigned int sectors) { unsigned char *buffer, *blockbuffer; unsigned int page, len, offset; unsigned int blockshift = MEDIA_INFO(us).blockshift; unsigned int pageshift = MEDIA_INFO(us).pageshift; unsigned int blocksize = MEDIA_INFO(us).blocksize; unsigned int pagesize = MEDIA_INFO(us).pagesize; struct scatterlist *sg; u16 lba, max_lba; int result; /* * Since we don't write the user data directly to the device, * we have to create a bounce buffer and move the data a piece * at a time between the bounce buffer and the actual transfer buffer. */ len = min(sectors, blocksize) * pagesize; buffer = kmalloc(len, GFP_NOIO); if (!buffer) return USB_STOR_TRANSPORT_ERROR; /* * We also need a temporary block buffer, where we read in the old data, * overwrite parts with the new data, and manipulate the redundancy data */ blockbuffer = kmalloc_array(pagesize + 64, blocksize, GFP_NOIO); if (!blockbuffer) { kfree(buffer); return USB_STOR_TRANSPORT_ERROR; } /* Figure out the initial LBA and page */ lba = address >> blockshift; page = (address & MEDIA_INFO(us).blockmask); max_lba = MEDIA_INFO(us).capacity >> (pageshift + blockshift); result = USB_STOR_TRANSPORT_GOOD; offset = 0; sg = NULL; while (sectors > 0) { /* Write as many sectors as possible in this block */ unsigned int pages = min(sectors, blocksize - page); len = pages << pageshift; /* Not overflowing capacity? */ if (lba >= max_lba) { usb_stor_dbg(us, "Requested lba %u exceeds maximum %u\n", lba, max_lba); result = USB_STOR_TRANSPORT_ERROR; break; } /* Get the data from the transfer buffer */ usb_stor_access_xfer_buf(buffer, len, us->srb, &sg, &offset, FROM_XFER_BUF); result = alauda_write_lba(us, lba, page, pages, buffer, blockbuffer); if (result != USB_STOR_TRANSPORT_GOOD) break; page = 0; lba++; sectors -= pages; } kfree(buffer); kfree(blockbuffer); return result; } /* * Our interface with the rest of the world */ static void alauda_info_destructor(void *extra) { struct alauda_info *info = (struct alauda_info *) extra; int port; if (!info) return; for (port = 0; port < 2; port++) { struct alauda_media_info *media_info = &info->port[port]; alauda_free_maps(media_info); kfree(media_info->lba_to_pba); kfree(media_info->pba_to_lba); } } /* * Initialize alauda_info struct and find the data-write endpoint */ static int init_alauda(struct us_data *us) { struct alauda_info *info; struct usb_host_interface *altsetting = us->pusb_intf->cur_altsetting; nand_init_ecc(); us->extra = kzalloc(sizeof(struct alauda_info), GFP_NOIO); if (!us->extra) return -ENOMEM; info = (struct alauda_info *) us->extra; us->extra_destructor = alauda_info_destructor; info->wr_ep = usb_sndbulkpipe(us->pusb_dev, altsetting->endpoint[0].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); return 0; } static int alauda_transport(struct scsi_cmnd *srb, struct us_data *us) { int rc; struct alauda_info *info = (struct alauda_info *) us->extra; unsigned char *ptr = us->iobuf; static const unsigned char inquiry_response[36] = { 0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00 }; if (srb->cmnd[0] == INQUIRY) { usb_stor_dbg(us, "INQUIRY - Returning bogus response\n"); memcpy(ptr, inquiry_response, sizeof(inquiry_response)); fill_inquiry_response(us, ptr, 36); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == TEST_UNIT_READY) { usb_stor_dbg(us, "TEST_UNIT_READY\n"); return alauda_check_media(us); } if (srb->cmnd[0] == READ_CAPACITY) { unsigned int num_zones; unsigned long capacity; rc = alauda_check_media(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; num_zones = MEDIA_INFO(us).capacity >> (MEDIA_INFO(us).zoneshift + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift); capacity = num_zones * MEDIA_INFO(us).uzonesize * MEDIA_INFO(us).blocksize; /* Report capacity and page size */ ((__be32 *) ptr)[0] = cpu_to_be32(capacity - 1); ((__be32 *) ptr)[1] = cpu_to_be32(512); usb_stor_set_xfer_buf(ptr, 8, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == READ_10) { unsigned int page, pages; rc = alauda_check_media(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; page = short_pack(srb->cmnd[3], srb->cmnd[2]); page <<= 16; page |= short_pack(srb->cmnd[5], srb->cmnd[4]); pages = short_pack(srb->cmnd[8], srb->cmnd[7]); usb_stor_dbg(us, "READ_10: page %d pagect %d\n", page, pages); return alauda_read_data(us, page, pages); } if (srb->cmnd[0] == WRITE_10) { unsigned int page, pages; rc = alauda_check_media(us); if (rc != USB_STOR_TRANSPORT_GOOD) return rc; page = short_pack(srb->cmnd[3], srb->cmnd[2]); page <<= 16; page |= short_pack(srb->cmnd[5], srb->cmnd[4]); pages = short_pack(srb->cmnd[8], srb->cmnd[7]); usb_stor_dbg(us, "WRITE_10: page %d pagect %d\n", page, pages); return alauda_write_data(us, page, pages); } if (srb->cmnd[0] == REQUEST_SENSE) { usb_stor_dbg(us, "REQUEST_SENSE\n"); memset(ptr, 0, 18); ptr[0] = 0xF0; ptr[2] = info->sense_key; ptr[7] = 11; ptr[12] = info->sense_asc; ptr[13] = info->sense_ascq; usb_stor_set_xfer_buf(ptr, 18, srb); return USB_STOR_TRANSPORT_GOOD; } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { /* * sure. whatever. not like we can stop the user from popping * the media out of the device (no locking doors, etc) */ return USB_STOR_TRANSPORT_GOOD; } usb_stor_dbg(us, "Gah! Unknown command: %d (0x%x)\n", srb->cmnd[0], srb->cmnd[0]); info->sense_key = 0x05; info->sense_asc = 0x20; info->sense_ascq = 0x00; return USB_STOR_TRANSPORT_FAILED; } static struct scsi_host_template alauda_host_template; static int alauda_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - alauda_usb_ids) + alauda_unusual_dev_list, &alauda_host_template); if (result) return result; us->transport_name = "Alauda Control/Bulk"; us->transport = alauda_transport; us->transport_reset = usb_stor_Bulk_reset; us->max_lun = 1; result = usb_stor_probe2(us); return result; } static struct usb_driver alauda_driver = { .name = DRV_NAME, .probe = alauda_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = alauda_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(alauda_driver, alauda_host_template, DRV_NAME); |
60 60 16 16 4 15 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SCSI_SCSI_HOST_H #define _SCSI_SCSI_HOST_H #include <linux/device.h> #include <linux/list.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/blk-mq.h> #include <scsi/scsi.h> struct block_device; struct completion; struct module; struct scsi_cmnd; struct scsi_device; struct scsi_target; struct Scsi_Host; struct scsi_transport_template; #define SG_ALL SG_CHUNK_SIZE #define MODE_UNKNOWN 0x00 #define MODE_INITIATOR 0x01 #define MODE_TARGET 0x02 /** * enum scsi_timeout_action - How to handle a command that timed out. * @SCSI_EH_DONE: The command has already been completed. * @SCSI_EH_RESET_TIMER: Reset the timer and continue waiting for completion. * @SCSI_EH_NOT_HANDLED: The command has not yet finished. Abort the command. */ enum scsi_timeout_action { SCSI_EH_DONE, SCSI_EH_RESET_TIMER, SCSI_EH_NOT_HANDLED, }; struct scsi_host_template { /* * Put fields referenced in IO submission path together in * same cacheline */ /* * Additional per-command data allocated for the driver. */ unsigned int cmd_size; /* * The queuecommand function is used to queue up a scsi * command block to the LLDD. When the driver finished * processing the command the done callback is invoked. * * If queuecommand returns 0, then the driver has accepted the * command. It must also push it to the HBA if the scsi_cmnd * flag SCMD_LAST is set, or if the driver does not implement * commit_rqs. The done() function must be called on the command * when the driver has finished with it. (you may call done on the * command before queuecommand returns, but in this case you * *must* return 0 from queuecommand). * * Queuecommand may also reject the command, in which case it may * not touch the command and must not call done() for it. * * There are two possible rejection returns: * * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but * allow commands to other devices serviced by this host. * * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this * host temporarily. * * For compatibility, any other non-zero return is treated the * same as SCSI_MLQUEUE_HOST_BUSY. * * NOTE: "temporarily" means either until the next command for# * this device/host completes, or a period of time determined by * I/O pressure in the system if there are no other outstanding * commands. * * STATUS: REQUIRED */ int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); /* * The commit_rqs function is used to trigger a hardware * doorbell after some requests have been queued with * queuecommand, when an error is encountered before sending * the request with SCMD_LAST set. * * STATUS: OPTIONAL */ void (*commit_rqs)(struct Scsi_Host *, u16); struct module *module; const char *name; /* * The info function will return whatever useful information the * developer sees fit. If not provided, then the name field will * be used instead. * * Status: OPTIONAL */ const char *(*info)(struct Scsi_Host *); /* * Ioctl interface * * Status: OPTIONAL */ int (*ioctl)(struct scsi_device *dev, unsigned int cmd, void __user *arg); #ifdef CONFIG_COMPAT /* * Compat handler. Handle 32bit ABI. * When unknown ioctl is passed return -ENOIOCTLCMD. * * Status: OPTIONAL */ int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd, void __user *arg); #endif int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); /* * This is an error handling strategy routine. You don't need to * define one of these if you don't want to - there is a default * routine that is present that should work in most cases. For those * driver authors that have the inclination and ability to write their * own strategy routine, this is where it is specified. Note - the * strategy routine is *ALWAYS* run in the context of the kernel eh * thread. Thus you are guaranteed to *NOT* be in an interrupt * handler when you execute this, and you are also guaranteed to * *NOT* have any other commands being queued while you are in the * strategy routine. When you return from this function, operations * return to normal. * * See scsi_error.c scsi_unjam_host for additional comments about * what this function should and should not be attempting to do. * * Status: REQUIRED (at least one of them) */ int (* eh_abort_handler)(struct scsi_cmnd *); int (* eh_device_reset_handler)(struct scsi_cmnd *); int (* eh_target_reset_handler)(struct scsi_cmnd *); int (* eh_bus_reset_handler)(struct scsi_cmnd *); int (* eh_host_reset_handler)(struct scsi_cmnd *); /* * Before the mid layer attempts to scan for a new device where none * currently exists, it will call this entry in your driver. Should * your driver need to allocate any structs or perform any other init * items in order to send commands to a currently unused target/lun * combo, then this is where you can perform those allocations. This * is specifically so that drivers won't have to perform any kind of * "is this a new device" checks in their queuecommand routine, * thereby making the hot path a bit quicker. * * Return values: 0 on success, non-0 on failure * * Deallocation: If we didn't find any devices at this ID, you will * get an immediate call to sdev_destroy(). If we find something * here then you will get a call to sdev_configure(), then the * device will be used for however long it is kept around, then when * the device is removed from the system (or * possibly at reboot * time), you will then get a call to sdev_destroy(). This is * assuming you implement sdev_configure and sdev_destroy. * However, if you allocate memory and hang it off the device struct, * then you must implement the sdev_destroy() routine at a minimum * in order to avoid leaking memory * each time a device is tore down. * * Status: OPTIONAL */ int (* sdev_init)(struct scsi_device *); /* * Once the device has responded to an INQUIRY and we know the * device is online, we call into the low level driver with the * struct scsi_device *. If the low level device driver implements * this function, it *must* perform the task of setting the queue * depth on the device. All other tasks are optional and depend * on what the driver supports and various implementation details. * * Things currently recommended to be handled at this time include: * * 1. Setting the device queue depth. Proper setting of this is * described in the comments for scsi_change_queue_depth. * 2. Determining if the device supports the various synchronous * negotiation protocols. The device struct will already have * responded to INQUIRY and the results of the standard items * will have been shoved into the various device flag bits, eg. * device->sdtr will be true if the device supports SDTR messages. * 3. Allocating command structs that the device will need. * 4. Setting the default timeout on this device (if needed). * 5. Anything else the low level driver might want to do on a device * specific setup basis... * 6. Return 0 on success, non-0 on error. The device will be marked * as offline on error so that no access will occur. If you return * non-0, your sdev_destroy routine will never get called for this * device, so don't leave any loose memory hanging around, clean * up after yourself before returning non-0 * * Status: OPTIONAL */ int (* sdev_configure)(struct scsi_device *, struct queue_limits *lim); /* * Immediately prior to deallocating the device and after all activity * has ceased the mid layer calls this point so that the low level * driver may completely detach itself from the scsi device and vice * versa. The low level driver is responsible for freeing any memory * it allocated in the sdev_init or sdev_configure calls. * * Status: OPTIONAL */ void (* sdev_destroy)(struct scsi_device *); /* * Before the mid layer attempts to scan for a new device attached * to a target where no target currently exists, it will call this * entry in your driver. Should your driver need to allocate any * structs or perform any other init items in order to send commands * to a currently unused target, then this is where you can perform * those allocations. * * Return values: 0 on success, non-0 on failure * * Status: OPTIONAL */ int (* target_alloc)(struct scsi_target *); /* * Immediately prior to deallocating the target structure, and * after all activity to attached scsi devices has ceased, the * midlayer calls this point so that the driver may deallocate * and terminate any references to the target. * * Note: This callback is called with the host lock held and hence * must not sleep. * * Status: OPTIONAL */ void (* target_destroy)(struct scsi_target *); /* * If a host has the ability to discover targets on its own instead * of scanning the entire bus, it can fill in this function and * call scsi_scan_host(). This function will be called periodically * until it returns 1 with the scsi_host and the elapsed time of * the scan in jiffies. * * Status: OPTIONAL */ int (* scan_finished)(struct Scsi_Host *, unsigned long); /* * If the host wants to be called before the scan starts, but * after the midlayer has set up ready for the scan, it can fill * in this function. * * Status: OPTIONAL */ void (* scan_start)(struct Scsi_Host *); /* * Fill in this function to allow the queue depth of this host * to be changeable (on a per device basis). Returns either * the current queue depth setting (may be different from what * was passed in) or an error. An error should only be * returned if the requested depth is legal but the driver was * unable to set it. If the requested depth is illegal, the * driver should set and return the closest legal queue depth. * * Status: OPTIONAL */ int (* change_queue_depth)(struct scsi_device *, int); /* * This functions lets the driver expose the queue mapping * to the block layer. * * Status: OPTIONAL */ void (* map_queues)(struct Scsi_Host *shost); /* * SCSI interface of blk_poll - poll for IO completions. * Only applicable if SCSI LLD exposes multiple h/w queues. * * Return value: Number of completed entries found. * * Status: OPTIONAL */ int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num); /* * Check if scatterlists need to be padded for DMA draining. * * Status: OPTIONAL */ bool (* dma_need_drain)(struct request *rq); /* * This function determines the BIOS parameters for a given * harddisk. These tend to be numbers that are made up by * the host adapter. Parameters: * size, device, list (heads, sectors, cylinders) * * Status: OPTIONAL */ int (* bios_param)(struct scsi_device *, struct block_device *, sector_t, int []); /* * This function is called when one or more partitions on the * device reach beyond the end of the device. * * Status: OPTIONAL */ void (*unlock_native_capacity)(struct scsi_device *); /* * Can be used to export driver statistics and other infos to the * world outside the kernel ie. userspace and it also provides an * interface to feed the driver with information. * * Status: OBSOLETE */ int (*show_info)(struct seq_file *, struct Scsi_Host *); int (*write_info)(struct Scsi_Host *, char *, int); /* * This is an optional routine that allows the transport to become * involved when a scsi io timer fires. The return value tells the * timer routine how to finish the io timeout handling. * * Status: OPTIONAL */ enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *); /* * Optional routine that allows the transport to decide if a cmd * is retryable. Return true if the transport is in a state the * cmd should be retried on. */ bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd); /* This is an optional routine that allows transport to initiate * LLD adapter or firmware reset using sysfs attribute. * * Return values: 0 on success, -ve value on failure. * * Status: OPTIONAL */ int (*host_reset)(struct Scsi_Host *shost, int reset_type); #define SCSI_ADAPTER_RESET 1 #define SCSI_FIRMWARE_RESET 2 /* * Name of proc directory */ const char *proc_name; /* * This determines if we will use a non-interrupt driven * or an interrupt driven scheme. It is set to the maximum number * of simultaneous commands a single hw queue in HBA will accept. */ int can_queue; /* * In many instances, especially where disconnect / reconnect are * supported, our host also has an ID on the SCSI bus. If this is * the case, then it must be reserved. Please set this_id to -1 if * your setup is in single initiator mode, and the host lacks an * ID. */ int this_id; /* * This determines the degree to which the host adapter is capable * of scatter-gather. */ unsigned short sg_tablesize; unsigned short sg_prot_tablesize; /* * Set this if the host adapter has limitations beside segment count. */ unsigned int max_sectors; /* * Maximum size in bytes of a single segment. */ unsigned int max_segment_size; unsigned int dma_alignment; /* * DMA scatter gather segment boundary limit. A segment crossing this * boundary will be split in two. */ unsigned long dma_boundary; unsigned long virt_boundary_mask; /* * This specifies "machine infinity" for host templates which don't * limit the transfer size. Note this limit represents an absolute * maximum, and may be over the transfer limits allowed for * individual devices (e.g. 256 for SCSI-1). */ #define SCSI_DEFAULT_MAX_SECTORS 1024 /* * True if this host adapter can make good use of linked commands. * This will allow more than one command to be queued to a given * unit on a given host. Set this to the maximum number of command * blocks to be provided for each device. Set this to 1 for one * command block per lun, 2 for two, etc. Do not set this to 0. * You should make sure that the host adapter will do the right thing * before you try setting this above 1. */ short cmd_per_lun; /* * Allocate tags starting from last allocated tag. */ bool tag_alloc_policy_rr : 1; /* * Track QUEUE_FULL events and reduce queue depth on demand. */ unsigned track_queue_depth:1; /* * This specifies the mode that a LLD supports. */ unsigned supported_mode:2; /* * True for emulated SCSI host adapters (e.g. ATAPI). */ unsigned emulated:1; /* * True if the low-level driver performs its own reset-settle delays. */ unsigned skip_settle_delay:1; /* True if the controller does not support WRITE SAME */ unsigned no_write_same:1; /* True if the host uses host-wide tagspace */ unsigned host_tagset:1; /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */ unsigned queuecommand_may_block:1; /* * Countdown for host blocking with no commands outstanding. */ unsigned int max_host_blocked; /* * Default value for the blocking. If the queue is empty, * host_blocked counts down in the request_fn until it restarts * host operations as zero is reached. * * FIXME: This should probably be a value in the template */ #define SCSI_DEFAULT_HOST_BLOCKED 7 /* * Pointer to the SCSI host sysfs attribute groups, NULL terminated. */ const struct attribute_group **shost_groups; /* * Pointer to the SCSI device attribute groups for this host, * NULL terminated. */ const struct attribute_group **sdev_groups; /* * Vendor Identifier associated with the host * * Note: When specifying vendor_id, be sure to read the * Vendor Type and ID formatting requirements specified in * scsi_netlink.h */ u64 vendor_id; }; /* * Temporary #define for host lock push down. Can be removed when all * drivers have been updated to take advantage of unlocked * queuecommand. * */ #define DEF_SCSI_QCMD(func_name) \ int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \ { \ unsigned long irq_flags; \ int rc; \ spin_lock_irqsave(shost->host_lock, irq_flags); \ rc = func_name##_lck(cmd); \ spin_unlock_irqrestore(shost->host_lock, irq_flags); \ return rc; \ } /* * shost state: If you alter this, you also need to alter scsi_sysfs.c * (for the ascii descriptions) and the state model enforcer: * scsi_host_set_state() */ enum scsi_host_state { SHOST_CREATED = 1, SHOST_RUNNING, SHOST_CANCEL, SHOST_DEL, SHOST_RECOVERY, SHOST_CANCEL_RECOVERY, SHOST_DEL_RECOVERY, }; struct Scsi_Host { /* * __devices is protected by the host_lock, but you should * usually use scsi_device_lookup / shost_for_each_device * to access it and don't care about locking yourself. * In the rare case of being in irq context you can use * their __ prefixed variants with the lock held. NEVER * access this list directly from a driver. */ struct list_head __devices; struct list_head __targets; struct list_head starved_list; spinlock_t default_lock; spinlock_t *host_lock; struct mutex scan_mutex;/* serialize scanning activity */ struct list_head eh_abort_list; struct list_head eh_cmd_q; struct task_struct * ehandler; /* Error recovery thread. */ struct completion * eh_action; /* Wait for specific actions on the host. */ wait_queue_head_t host_wait; const struct scsi_host_template *hostt; struct scsi_transport_template *transportt; struct kref tagset_refcnt; struct completion tagset_freed; /* Area to keep a shared tag map */ struct blk_mq_tag_set tag_set; atomic_t host_blocked; unsigned int host_failed; /* commands that failed. protected by host_lock */ unsigned int host_eh_scheduled; /* EH scheduled without command */ unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ /* next two fields are used to bound the time spent in error handling */ int eh_deadline; unsigned long last_reset; /* * These three parameters can be used to allow for wide scsi, * and for host adapters that support multiple busses * The last two should be set to 1 more than the actual max id * or lun (e.g. 8 for SCSI parallel systems). */ unsigned int max_channel; unsigned int max_id; u64 max_lun; /* * This is a unique identifier that must be assigned so that we * have some way of identifying each detected host adapter properly * and uniquely. For hosts that do not support more than one card * in the system at one time, this does not need to be set. It is * initialized to 0 in scsi_host_alloc. */ unsigned int unique_id; /* * The maximum length of SCSI commands that this host can accept. * Probably 12 for most host adapters, but could be 16 for others. * or 260 if the driver supports variable length cdbs. * For drivers that don't set this field, a value of 12 is * assumed. */ unsigned short max_cmd_len; int this_id; int can_queue; short cmd_per_lun; short unsigned int sg_tablesize; short unsigned int sg_prot_tablesize; unsigned int max_sectors; unsigned int opt_sectors; unsigned int max_segment_size; unsigned int dma_alignment; unsigned long dma_boundary; unsigned long virt_boundary_mask; /* * In scsi-mq mode, the number of hardware queues supported by the LLD. * * Note: it is assumed that each hardware queue has a queue depth of * can_queue. In other words, the total queue depth per host * is nr_hw_queues * can_queue. However, for when host_tagset is set, * the total queue depth is can_queue. */ unsigned nr_hw_queues; unsigned nr_maps; unsigned active_mode:2; /* * Host has requested that no further requests come through for the * time being. */ unsigned host_self_blocked:1; /* * Host uses correct SCSI ordering not PC ordering. The bit is * set for the minority of drivers whose authors actually read * the spec ;). */ unsigned reverse_ordering:1; /* Task mgmt function in progress */ unsigned tmf_in_progress:1; /* Asynchronous scan in progress */ unsigned async_scan:1; /* Don't resume host in EH */ unsigned eh_noresume:1; /* The controller does not support WRITE SAME */ unsigned no_write_same:1; /* True if the host uses host-wide tagspace */ unsigned host_tagset:1; /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */ unsigned queuecommand_may_block:1; /* Host responded with short (<36 bytes) INQUIRY result */ unsigned short_inquiry:1; /* The transport requires the LUN bits NOT to be stored in CDB[1] */ unsigned no_scsi2_lun_in_cdb:1; /* * Optional work queue to be utilized by the transport */ struct workqueue_struct *work_q; /* * Task management function work queue */ struct workqueue_struct *tmf_work_q; /* * Value host_blocked counts down from */ unsigned int max_host_blocked; /* Protection Information */ unsigned int prot_capabilities; unsigned char prot_guard_type; /* legacy crap */ unsigned long base; unsigned long io_port; unsigned char n_io_port; unsigned char dma_channel; unsigned int irq; enum scsi_host_state shost_state; /* ldm bits */ struct device shost_gendev, shost_dev; /* * Points to the transport data (if any) which is allocated * separately */ void *shost_data; /* * Points to the physical bus device we'd use to do DMA * Needed just in case we have virtual hosts. */ struct device *dma_dev; /* Delay for runtime autosuspend */ int rpm_autosuspend_delay; /* * We should ensure that this is aligned, both for better performance * and also because some compilers (m68k) don't automatically force * alignment to a long boundary. */ unsigned long hostdata[] /* Used for storage of host specific stuff */ __attribute__ ((aligned (sizeof(unsigned long)))); }; #define class_to_shost(d) \ container_of(d, struct Scsi_Host, shost_dev) #define shost_printk(prefix, shost, fmt, a...) \ dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a) static inline void *shost_priv(struct Scsi_Host *shost) { return (void *)shost->hostdata; } int scsi_is_host_device(const struct device *); static inline struct Scsi_Host *dev_to_shost(struct device *dev) { while (!scsi_is_host_device(dev)) { if (!dev->parent) return NULL; dev = dev->parent; } return container_of(dev, struct Scsi_Host, shost_gendev); } static inline int scsi_host_in_recovery(struct Scsi_Host *shost) { return shost->shost_state == SHOST_RECOVERY || shost->shost_state == SHOST_CANCEL_RECOVERY || shost->shost_state == SHOST_DEL_RECOVERY || shost->tmf_in_progress; } extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); extern void scsi_flush_work(struct Scsi_Host *); extern struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *, int); extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, struct device *, struct device *); #if defined(CONFIG_SCSI_PROC_FS) struct proc_dir_entry * scsi_template_proc_dir(const struct scsi_host_template *sht); #else #define scsi_template_proc_dir(sht) NULL #endif extern void scsi_scan_host(struct Scsi_Host *); extern int scsi_resume_device(struct scsi_device *sdev); extern int scsi_rescan_device(struct scsi_device *sdev); extern void scsi_remove_host(struct Scsi_Host *); extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); extern int scsi_host_busy(struct Scsi_Host *shost); extern void scsi_host_put(struct Scsi_Host *t); extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum); extern const char *scsi_host_state_name(enum scsi_host_state); extern void scsi_host_complete_all_commands(struct Scsi_Host *shost, enum scsi_host_status status); static inline int __must_check scsi_add_host(struct Scsi_Host *host, struct device *dev) { return scsi_add_host_with_dma(host, dev, dev); } static inline struct device *scsi_get_device(struct Scsi_Host *shost) { return shost->shost_gendev.parent; } /** * scsi_host_scan_allowed - Is scanning of this host allowed * @shost: Pointer to Scsi_Host. **/ static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) { return shost->shost_state == SHOST_RUNNING || shost->shost_state == SHOST_RECOVERY; } extern void scsi_unblock_requests(struct Scsi_Host *); extern void scsi_block_requests(struct Scsi_Host *); extern int scsi_host_block(struct Scsi_Host *shost); extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state); void scsi_host_busy_iter(struct Scsi_Host *, bool (*fn)(struct scsi_cmnd *, void *), void *priv); struct class_container; /* * DIF defines the exchange of protection information between * initiator and SBC block device. * * DIX defines the exchange of protection information between OS and * initiator. */ enum scsi_host_prot_capabilities { SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */ SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */ SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */ SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */ SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */ SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */ SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */ }; /* * SCSI hosts which support the Data Integrity Extensions must * indicate their capabilities by setting the prot_capabilities using * this call. */ static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask) { shost->prot_capabilities = mask; } static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) { return shost->prot_capabilities; } static inline int scsi_host_prot_dma(struct Scsi_Host *shost) { return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION; } static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) { static unsigned char cap[] = { 0, SHOST_DIF_TYPE1_PROTECTION, SHOST_DIF_TYPE2_PROTECTION, SHOST_DIF_TYPE3_PROTECTION }; if (target_type >= ARRAY_SIZE(cap)) return 0; return shost->prot_capabilities & cap[target_type] ? target_type : 0; } static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) { #if defined(CONFIG_BLK_DEV_INTEGRITY) static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION, SHOST_DIX_TYPE1_PROTECTION, SHOST_DIX_TYPE2_PROTECTION, SHOST_DIX_TYPE3_PROTECTION }; if (target_type >= ARRAY_SIZE(cap)) return 0; return shost->prot_capabilities & cap[target_type]; #endif return 0; } /* * All DIX-capable initiators must support the T10-mandated CRC * checksum. Controllers can optionally implement the IP checksum * scheme which has much lower impact on system performance. Note * that the main rationale for the checksum is to match integrity * metadata with data. Detecting bit errors are a job for ECC memory * and buses. */ enum scsi_host_guard_type { SHOST_DIX_GUARD_CRC = 1 << 0, SHOST_DIX_GUARD_IP = 1 << 1, }; static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type) { shost->prot_guard_type = type; } static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost) { return shost->prot_guard_type; } extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state); #endif /* _SCSI_SCSI_HOST_H */ |
3230 3240 3137 3132 3211 3208 3212 3213 3211 3205 3215 3218 3132 3143 3136 3139 3143 3134 3239 3230 3142 3231 3209 3125 3125 3136 3136 3135 3143 3143 3210 3233 3228 339 3229 3212 5 5 5 5 5 5 5 5 5 5 5 5 3231 3230 3228 3229 3236 3230 3236 3234 3235 1195 30 3228 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 | // SPDX-License-Identifier: GPL-2.0-only /* * klist.c - Routines for manipulating klists. * * Copyright (C) 2005 Patrick Mochel * * This klist interface provides a couple of structures that wrap around * struct list_head to provide explicit list "head" (struct klist) and list * "node" (struct klist_node) objects. For struct klist, a spinlock is * included that protects access to the actual list itself. struct * klist_node provides a pointer to the klist that owns it and a kref * reference count that indicates the number of current users of that node * in the list. * * The entire point is to provide an interface for iterating over a list * that is safe and allows for modification of the list during the * iteration (e.g. insertion and removal), including modification of the * current node on the list. * * It works using a 3rd object type - struct klist_iter - that is declared * and initialized before an iteration. klist_next() is used to acquire the * next element in the list. It returns NULL if there are no more items. * Internally, that routine takes the klist's lock, decrements the * reference count of the previous klist_node and increments the count of * the next klist_node. It then drops the lock and returns. * * There are primitives for adding and removing nodes to/from a klist. * When deleting, klist_del() will simply decrement the reference count. * Only when the count goes to 0 is the node removed from the list. * klist_remove() will try to delete the node from the list and block until * it is actually removed. This is useful for objects (like devices) that * have been removed from the system and must be freed (but must wait until * all accessors have finished). */ #include <linux/klist.h> #include <linux/export.h> #include <linux/sched.h> /* * Use the lowest bit of n_klist to mark deleted nodes and exclude * dead ones from iteration. */ #define KNODE_DEAD 1LU #define KNODE_KLIST_MASK ~KNODE_DEAD static struct klist *knode_klist(struct klist_node *knode) { return (struct klist *) ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); } static bool knode_dead(struct klist_node *knode) { return (unsigned long)knode->n_klist & KNODE_DEAD; } static void knode_set_klist(struct klist_node *knode, struct klist *klist) { knode->n_klist = klist; /* no knode deserves to start its life dead */ WARN_ON(knode_dead(knode)); } static void knode_kill(struct klist_node *knode) { /* and no knode should die twice ever either, see we're very humane */ WARN_ON(knode_dead(knode)); *(unsigned long *)&knode->n_klist |= KNODE_DEAD; } /** * klist_init - Initialize a klist structure. * @k: The klist we're initializing. * @get: The get function for the embedding object (NULL if none) * @put: The put function for the embedding object (NULL if none) * * Initialises the klist structure. If the klist_node structures are * going to be embedded in refcounted objects (necessary for safe * deletion) then the get/put arguments are used to initialise * functions that take and release references on the embedding * objects. */ void klist_init(struct klist *k, void (*get)(struct klist_node *), void (*put)(struct klist_node *)) { INIT_LIST_HEAD(&k->k_list); spin_lock_init(&k->k_lock); k->get = get; k->put = put; } EXPORT_SYMBOL_GPL(klist_init); static void add_head(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void add_tail(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add_tail(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void klist_node_init(struct klist *k, struct klist_node *n) { INIT_LIST_HEAD(&n->n_node); kref_init(&n->n_ref); knode_set_klist(n, k); if (k->get) k->get(n); } /** * klist_add_head - Initialize a klist_node and add it to front. * @n: node we're adding. * @k: klist it's going on. */ void klist_add_head(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_head(k, n); } EXPORT_SYMBOL_GPL(klist_add_head); /** * klist_add_tail - Initialize a klist_node and add it to back. * @n: node we're adding. * @k: klist it's going on. */ void klist_add_tail(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_tail(k, n); } EXPORT_SYMBOL_GPL(klist_add_tail); /** * klist_add_behind - Init a klist_node and add it after an existing node * @n: node we're adding. * @pos: node to put @n after */ void klist_add_behind(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_behind); /** * klist_add_before - Init a klist_node and add it before an existing node * @n: node we're adding. * @pos: node to put @n after */ void klist_add_before(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add_tail(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_before); struct klist_waiter { struct list_head list; struct klist_node *node; struct task_struct *process; int woken; }; static DEFINE_SPINLOCK(klist_remove_lock); static LIST_HEAD(klist_remove_waiters); static void klist_release(struct kref *kref) { struct klist_waiter *waiter, *tmp; struct klist_node *n = container_of(kref, struct klist_node, n_ref); WARN_ON(!knode_dead(n)); list_del(&n->n_node); spin_lock(&klist_remove_lock); list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { if (waiter->node != n) continue; list_del(&waiter->list); waiter->woken = 1; mb(); wake_up_process(waiter->process); } spin_unlock(&klist_remove_lock); knode_set_klist(n, NULL); } static int klist_dec_and_del(struct klist_node *n) { return kref_put(&n->n_ref, klist_release); } static void klist_put(struct klist_node *n, bool kill) { struct klist *k = knode_klist(n); void (*put)(struct klist_node *) = k->put; spin_lock(&k->k_lock); if (kill) knode_kill(n); if (!klist_dec_and_del(n)) put = NULL; spin_unlock(&k->k_lock); if (put) put(n); } /** * klist_del - Decrement the reference count of node and try to remove. * @n: node we're deleting. */ void klist_del(struct klist_node *n) { klist_put(n, true); } EXPORT_SYMBOL_GPL(klist_del); /** * klist_remove - Decrement the refcount of node and wait for it to go away. * @n: node we're removing. */ void klist_remove(struct klist_node *n) { struct klist_waiter waiter; waiter.node = n; waiter.process = current; waiter.woken = 0; spin_lock(&klist_remove_lock); list_add(&waiter.list, &klist_remove_waiters); spin_unlock(&klist_remove_lock); klist_del(n); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (waiter.woken) break; schedule(); } __set_current_state(TASK_RUNNING); } EXPORT_SYMBOL_GPL(klist_remove); /** * klist_node_attached - Say whether a node is bound to a list or not. * @n: Node that we're testing. */ int klist_node_attached(struct klist_node *n) { return (n->n_klist != NULL); } EXPORT_SYMBOL_GPL(klist_node_attached); /** * klist_iter_init_node - Initialize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter we're filling. * @n: node to start with. * * Similar to klist_iter_init(), but starts the action off with @n, * instead of with the list head. */ void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n) { i->i_klist = k; i->i_cur = NULL; if (n && kref_get_unless_zero(&n->n_ref)) i->i_cur = n; } EXPORT_SYMBOL_GPL(klist_iter_init_node); /** * klist_iter_init - Iniitalize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter structure we're filling. * * Similar to klist_iter_init_node(), but start with the list head. */ void klist_iter_init(struct klist *k, struct klist_iter *i) { klist_iter_init_node(k, i, NULL); } EXPORT_SYMBOL_GPL(klist_iter_init); /** * klist_iter_exit - Finish a list iteration. * @i: Iterator structure. * * Must be called when done iterating over list, as it decrements the * refcount of the current node. Necessary in case iteration exited before * the end of the list was reached, and always good form. */ void klist_iter_exit(struct klist_iter *i) { if (i->i_cur) { klist_put(i->i_cur, false); i->i_cur = NULL; } } EXPORT_SYMBOL_GPL(klist_iter_exit); static struct klist_node *to_klist_node(struct list_head *n) { return container_of(n, struct klist_node, n_node); } /** * klist_prev - Ante up prev node in list. * @i: Iterator structure. * * First grab list lock. Decrement the reference count of the previous * node, if there was one. Grab the prev node, increment its reference * count, drop the lock, and return that prev node. */ struct klist_node *klist_prev(struct klist_iter *i) { void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *prev; unsigned long flags; spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { prev = to_klist_node(last->n_node.prev); if (!klist_dec_and_del(last)) put = NULL; } else prev = to_klist_node(i->i_klist->k_list.prev); i->i_cur = NULL; while (prev != to_klist_node(&i->i_klist->k_list)) { if (likely(!knode_dead(prev))) { kref_get(&prev->n_ref); i->i_cur = prev; break; } prev = to_klist_node(prev->n_node.prev); } spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); return i->i_cur; } EXPORT_SYMBOL_GPL(klist_prev); /** * klist_next - Ante up next node in list. * @i: Iterator structure. * * First grab list lock. Decrement the reference count of the previous * node, if there was one. Grab the next node, increment its reference * count, drop the lock, and return that next node. */ struct klist_node *klist_next(struct klist_iter *i) { void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *next; unsigned long flags; spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { next = to_klist_node(last->n_node.next); if (!klist_dec_and_del(last)) put = NULL; } else next = to_klist_node(i->i_klist->k_list.next); i->i_cur = NULL; while (next != to_klist_node(&i->i_klist->k_list)) { if (likely(!knode_dead(next))) { kref_get(&next->n_ref); i->i_cur = next; break; } next = to_klist_node(next->n_node.next); } spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); return i->i_cur; } EXPORT_SYMBOL_GPL(klist_next); |
1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 | // SPDX-License-Identifier: GPL-2.0-or-later /* * uvc_video.c -- USB Video Class driver - Video handling * * Copyright (C) 2005-2010 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ #include <linux/dma-mapping.h> #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/videodev2.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/atomic.h> #include <linux/unaligned.h> #include <media/jpeg.h> #include <media/v4l2-common.h> #include "uvcvideo.h" /* ------------------------------------------------------------------------ * UVC Controls */ static int __uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit, u8 intfnum, u8 cs, void *data, u16 size, int timeout) { u8 type = USB_TYPE_CLASS | USB_RECIP_INTERFACE; unsigned int pipe; pipe = (query & 0x80) ? usb_rcvctrlpipe(dev->udev, 0) : usb_sndctrlpipe(dev->udev, 0); type |= (query & 0x80) ? USB_DIR_IN : USB_DIR_OUT; return usb_control_msg(dev->udev, pipe, query, type, cs << 8, unit << 8 | intfnum, data, size, timeout); } static const char *uvc_query_name(u8 query) { switch (query) { case UVC_SET_CUR: return "SET_CUR"; case UVC_GET_CUR: return "GET_CUR"; case UVC_GET_MIN: return "GET_MIN"; case UVC_GET_MAX: return "GET_MAX"; case UVC_GET_RES: return "GET_RES"; case UVC_GET_LEN: return "GET_LEN"; case UVC_GET_INFO: return "GET_INFO"; case UVC_GET_DEF: return "GET_DEF"; default: return "<invalid>"; } } int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit, u8 intfnum, u8 cs, void *data, u16 size) { int ret; u8 error; u8 tmp; ret = __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size, UVC_CTRL_CONTROL_TIMEOUT); if (likely(ret == size)) return 0; /* * Some devices return shorter USB control packets than expected if the * returned value can fit in less bytes. Zero all the bytes that the * device has not written. * * This quirk is applied to all controls, regardless of their data type. * Most controls are little-endian integers, in which case the missing * bytes become 0 MSBs. For other data types, a different heuristic * could be implemented if a device is found needing it. * * We exclude UVC_GET_INFO from the quirk. UVC_GET_LEN does not need * to be excluded because its size is always 1. */ if (ret > 0 && query != UVC_GET_INFO) { memset(data + ret, 0, size - ret); dev_warn_once(&dev->udev->dev, "UVC non compliance: %s control %u on unit %u returned %d bytes when we expected %u.\n", uvc_query_name(query), cs, unit, ret, size); return 0; } if (ret != -EPIPE) { dev_err(&dev->udev->dev, "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n", uvc_query_name(query), cs, unit, ret, size); return ret < 0 ? ret : -EPIPE; } /* Reuse data[0] to request the error code. */ tmp = *(u8 *)data; ret = __uvc_query_ctrl(dev, UVC_GET_CUR, 0, intfnum, UVC_VC_REQUEST_ERROR_CODE_CONTROL, data, 1, UVC_CTRL_CONTROL_TIMEOUT); error = *(u8 *)data; *(u8 *)data = tmp; if (ret != 1) { dev_err_ratelimited(&dev->udev->dev, "Failed to query (%s) UVC error code control %u on unit %u: %d (exp. 1).\n", uvc_query_name(query), cs, unit, ret); return ret < 0 ? ret : -EPIPE; } uvc_dbg(dev, CONTROL, "Control error %u\n", error); switch (error) { case 0: /* Cannot happen - we received a STALL */ return -EPIPE; case 1: /* Not ready */ return -EBUSY; case 2: /* Wrong state */ return -EACCES; case 3: /* Power */ return -EREMOTE; case 4: /* Out of range */ return -ERANGE; case 5: /* Invalid unit */ case 6: /* Invalid control */ case 7: /* Invalid Request */ /* * The firmware has not properly implemented * the control or there has been a HW error. */ return -EIO; case 8: /* Invalid value within range */ return -EINVAL; default: /* reserved or unknown */ break; } return -EPIPE; } static const struct usb_device_id elgato_cam_link_4k = { USB_DEVICE(0x0fd9, 0x0066) }; static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl) { const struct uvc_format *format = NULL; const struct uvc_frame *frame = NULL; unsigned int i; /* * The response of the Elgato Cam Link 4K is incorrect: The second byte * contains bFormatIndex (instead of being the second byte of bmHint). * The first byte is always zero. The third byte is always 1. * * The UVC 1.5 class specification defines the first five bits in the * bmHint bitfield. The remaining bits are reserved and should be zero. * Therefore a valid bmHint will be less than 32. * * Latest Elgato Cam Link 4K firmware as of 2021-03-23 needs this fix. * MCU: 20.02.19, FPGA: 67 */ if (usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k) && ctrl->bmHint > 255) { u8 corrected_format_index = ctrl->bmHint >> 8; uvc_dbg(stream->dev, VIDEO, "Correct USB video probe response from {bmHint: 0x%04x, bFormatIndex: %u} to {bmHint: 0x%04x, bFormatIndex: %u}\n", ctrl->bmHint, ctrl->bFormatIndex, 1, corrected_format_index); ctrl->bmHint = 1; ctrl->bFormatIndex = corrected_format_index; } for (i = 0; i < stream->nformats; ++i) { if (stream->formats[i].index == ctrl->bFormatIndex) { format = &stream->formats[i]; break; } } if (format == NULL) return; for (i = 0; i < format->nframes; ++i) { if (format->frames[i].bFrameIndex == ctrl->bFrameIndex) { frame = &format->frames[i]; break; } } if (frame == NULL) return; if (!(format->flags & UVC_FMT_FLAG_COMPRESSED) || (ctrl->dwMaxVideoFrameSize == 0 && stream->dev->uvc_version < 0x0110)) ctrl->dwMaxVideoFrameSize = frame->dwMaxVideoFrameBufferSize; /* * The "TOSHIBA Web Camera - 5M" Chicony device (04f2:b50b) seems to * compute the bandwidth on 16 bits and erroneously sign-extend it to * 32 bits, resulting in a huge bandwidth value. Detect and fix that * condition by setting the 16 MSBs to 0 when they're all equal to 1. */ if ((ctrl->dwMaxPayloadTransferSize & 0xffff0000) == 0xffff0000) ctrl->dwMaxPayloadTransferSize &= ~0xffff0000; if (!(format->flags & UVC_FMT_FLAG_COMPRESSED) && stream->dev->quirks & UVC_QUIRK_FIX_BANDWIDTH && stream->intf->num_altsetting > 1) { u32 interval; u32 bandwidth; interval = (ctrl->dwFrameInterval > 100000) ? ctrl->dwFrameInterval : frame->dwFrameInterval[0]; /* * Compute a bandwidth estimation by multiplying the frame * size by the number of video frames per second, divide the * result by the number of USB frames (or micro-frames for * high- and super-speed devices) per second and add the UVC * header size (assumed to be 12 bytes long). */ bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp; bandwidth *= 10000000 / interval + 1; bandwidth /= 1000; if (stream->dev->udev->speed >= USB_SPEED_HIGH) bandwidth /= 8; bandwidth += 12; /* * The bandwidth estimate is too low for many cameras. Don't use * maximum packet sizes lower than 1024 bytes to try and work * around the problem. According to measurements done on two * different camera models, the value is high enough to get most * resolutions working while not preventing two simultaneous * VGA streams at 15 fps. */ bandwidth = max_t(u32, bandwidth, 1024); ctrl->dwMaxPayloadTransferSize = bandwidth; } } static size_t uvc_video_ctrl_size(struct uvc_streaming *stream) { /* * Return the size of the video probe and commit controls, which depends * on the protocol version. */ if (stream->dev->uvc_version < 0x0110) return 26; else if (stream->dev->uvc_version < 0x0150) return 34; else return 48; } static int uvc_get_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl, int probe, u8 query) { u16 size = uvc_video_ctrl_size(stream); u8 *data; int ret; if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) && query == UVC_GET_DEF) return -EIO; data = kmalloc(size, GFP_KERNEL); if (data == NULL) return -ENOMEM; ret = __uvc_query_ctrl(stream->dev, query, 0, stream->intfnum, probe ? UVC_VS_PROBE_CONTROL : UVC_VS_COMMIT_CONTROL, data, size, uvc_timeout_param); if ((query == UVC_GET_MIN || query == UVC_GET_MAX) && ret == 2) { /* * Some cameras, mostly based on Bison Electronics chipsets, * answer a GET_MIN or GET_MAX request with the wCompQuality * field only. */ uvc_warn_once(stream->dev, UVC_WARN_MINMAX, "UVC non " "compliance - GET_MIN/MAX(PROBE) incorrectly " "supported. Enabling workaround.\n"); memset(ctrl, 0, sizeof(*ctrl)); ctrl->wCompQuality = le16_to_cpup((__le16 *)data); ret = 0; goto out; } else if (query == UVC_GET_DEF && probe == 1 && ret != size) { /* * Many cameras don't support the GET_DEF request on their * video probe control. Warn once and return, the caller will * fall back to GET_CUR. */ uvc_warn_once(stream->dev, UVC_WARN_PROBE_DEF, "UVC non " "compliance - GET_DEF(PROBE) not supported. " "Enabling workaround.\n"); ret = -EIO; goto out; } else if (ret != size) { dev_err(&stream->intf->dev, "Failed to query (%s) UVC %s control : %d (exp. %u).\n", uvc_query_name(query), probe ? "probe" : "commit", ret, size); ret = (ret == -EPROTO) ? -EPROTO : -EIO; goto out; } ctrl->bmHint = le16_to_cpup((__le16 *)&data[0]); ctrl->bFormatIndex = data[2]; ctrl->bFrameIndex = data[3]; ctrl->dwFrameInterval = le32_to_cpup((__le32 *)&data[4]); ctrl->wKeyFrameRate = le16_to_cpup((__le16 *)&data[8]); ctrl->wPFrameRate = le16_to_cpup((__le16 *)&data[10]); ctrl->wCompQuality = le16_to_cpup((__le16 *)&data[12]); ctrl->wCompWindowSize = le16_to_cpup((__le16 *)&data[14]); ctrl->wDelay = le16_to_cpup((__le16 *)&data[16]); ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]); ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]); if (size >= 34) { ctrl->dwClockFrequency = get_unaligned_le32(&data[26]); ctrl->bmFramingInfo = data[30]; ctrl->bPreferedVersion = data[31]; ctrl->bMinVersion = data[32]; ctrl->bMaxVersion = data[33]; } else { ctrl->dwClockFrequency = stream->dev->clock_frequency; ctrl->bmFramingInfo = 0; ctrl->bPreferedVersion = 0; ctrl->bMinVersion = 0; ctrl->bMaxVersion = 0; } /* * Some broken devices return null or wrong dwMaxVideoFrameSize and * dwMaxPayloadTransferSize fields. Try to get the value from the * format and frame descriptors. */ uvc_fixup_video_ctrl(stream, ctrl); ret = 0; out: kfree(data); return ret; } static int uvc_set_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl, int probe) { u16 size = uvc_video_ctrl_size(stream); u8 *data; int ret; data = kzalloc(size, GFP_KERNEL); if (data == NULL) return -ENOMEM; *(__le16 *)&data[0] = cpu_to_le16(ctrl->bmHint); data[2] = ctrl->bFormatIndex; data[3] = ctrl->bFrameIndex; *(__le32 *)&data[4] = cpu_to_le32(ctrl->dwFrameInterval); *(__le16 *)&data[8] = cpu_to_le16(ctrl->wKeyFrameRate); *(__le16 *)&data[10] = cpu_to_le16(ctrl->wPFrameRate); *(__le16 *)&data[12] = cpu_to_le16(ctrl->wCompQuality); *(__le16 *)&data[14] = cpu_to_le16(ctrl->wCompWindowSize); *(__le16 *)&data[16] = cpu_to_le16(ctrl->wDelay); put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]); put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]); if (size >= 34) { put_unaligned_le32(ctrl->dwClockFrequency, &data[26]); data[30] = ctrl->bmFramingInfo; data[31] = ctrl->bPreferedVersion; data[32] = ctrl->bMinVersion; data[33] = ctrl->bMaxVersion; } ret = __uvc_query_ctrl(stream->dev, UVC_SET_CUR, 0, stream->intfnum, probe ? UVC_VS_PROBE_CONTROL : UVC_VS_COMMIT_CONTROL, data, size, uvc_timeout_param); if (ret != size) { dev_err(&stream->intf->dev, "Failed to set UVC %s control : %d (exp. %u).\n", probe ? "probe" : "commit", ret, size); ret = -EIO; } kfree(data); return ret; } int uvc_probe_video(struct uvc_streaming *stream, struct uvc_streaming_control *probe) { struct uvc_streaming_control probe_min, probe_max; unsigned int i; int ret; /* * Perform probing. The device should adjust the requested values * according to its capabilities. However, some devices, namely the * first generation UVC Logitech webcams, don't implement the Video * Probe control properly, and just return the needed bandwidth. For * that reason, if the needed bandwidth exceeds the maximum available * bandwidth, try to lower the quality. */ ret = uvc_set_video_ctrl(stream, probe, 1); if (ret < 0) goto done; /* Get the minimum and maximum values for compression settings. */ if (!(stream->dev->quirks & UVC_QUIRK_PROBE_MINMAX)) { ret = uvc_get_video_ctrl(stream, &probe_min, 1, UVC_GET_MIN); if (ret < 0) goto done; ret = uvc_get_video_ctrl(stream, &probe_max, 1, UVC_GET_MAX); if (ret < 0) goto done; probe->wCompQuality = probe_max.wCompQuality; } for (i = 0; i < 2; ++i) { ret = uvc_set_video_ctrl(stream, probe, 1); if (ret < 0) goto done; ret = uvc_get_video_ctrl(stream, probe, 1, UVC_GET_CUR); if (ret < 0) goto done; if (stream->intf->num_altsetting == 1) break; if (probe->dwMaxPayloadTransferSize <= stream->maxpsize) break; if (stream->dev->quirks & UVC_QUIRK_PROBE_MINMAX) { ret = -ENOSPC; goto done; } /* TODO: negotiate compression parameters */ probe->wKeyFrameRate = probe_min.wKeyFrameRate; probe->wPFrameRate = probe_min.wPFrameRate; probe->wCompQuality = probe_max.wCompQuality; probe->wCompWindowSize = probe_min.wCompWindowSize; } done: return ret; } static int uvc_commit_video(struct uvc_streaming *stream, struct uvc_streaming_control *probe) { return uvc_set_video_ctrl(stream, probe, 0); } /* ----------------------------------------------------------------------------- * Clocks and timestamps */ static inline ktime_t uvc_video_get_time(void) { if (uvc_clock_param == CLOCK_MONOTONIC) return ktime_get(); else return ktime_get_real(); } static void uvc_video_clock_add_sample(struct uvc_clock *clock, const struct uvc_clock_sample *sample) { unsigned long flags; /* * If we write new data on the position where we had the last * overflow, remove the overflow pointer. There is no SOF overflow * in the whole circular buffer. */ if (clock->head == clock->last_sof_overflow) clock->last_sof_overflow = -1; spin_lock_irqsave(&clock->lock, flags); if (clock->count > 0 && clock->last_sof > sample->dev_sof) { /* * Remove data from the circular buffer that is older than the * last SOF overflow. We only support one SOF overflow per * circular buffer. */ if (clock->last_sof_overflow != -1) clock->count = (clock->head - clock->last_sof_overflow + clock->size) % clock->size; clock->last_sof_overflow = clock->head; } /* Add sample. */ clock->samples[clock->head] = *sample; clock->head = (clock->head + 1) % clock->size; clock->count = min(clock->count + 1, clock->size); spin_unlock_irqrestore(&clock->lock, flags); } static void uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, const u8 *data, int len) { struct uvc_clock_sample sample; unsigned int header_size; bool has_pts = false; bool has_scr = false; switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) { case UVC_STREAM_PTS | UVC_STREAM_SCR: header_size = 12; has_pts = true; has_scr = true; break; case UVC_STREAM_PTS: header_size = 6; has_pts = true; break; case UVC_STREAM_SCR: header_size = 8; has_scr = true; break; default: header_size = 2; break; } /* Check for invalid headers. */ if (len < header_size) return; /* * Extract the timestamps: * * - store the frame PTS in the buffer structure * - if the SCR field is present, retrieve the host SOF counter and * kernel timestamps and store them with the SCR STC and SOF fields * in the ring buffer */ if (has_pts && buf != NULL) buf->pts = get_unaligned_le32(&data[2]); if (!has_scr) return; /* * To limit the amount of data, drop SCRs with an SOF identical to the * previous one. This filtering is also needed to support UVC 1.5, where * all the data packets of the same frame contains the same SOF. In that * case only the first one will match the host_sof. */ sample.dev_sof = get_unaligned_le16(&data[header_size - 2]); if (sample.dev_sof == stream->clock.last_sof) return; sample.dev_stc = get_unaligned_le32(&data[header_size - 6]); /* * STC (Source Time Clock) is the clock used by the camera. The UVC 1.5 * standard states that it "must be captured when the first video data * of a video frame is put on the USB bus". This is generally understood * as requiring devices to clear the payload header's SCR bit before * the first packet containing video data. * * Most vendors follow that interpretation, but some (namely SunplusIT * on some devices) always set the `UVC_STREAM_SCR` bit, fill the SCR * field with 0's,and expect that the driver only processes the SCR if * there is data in the packet. * * Ignore all the hardware timestamp information if we haven't received * any data for this frame yet, the packet contains no data, and both * STC and SOF are zero. This heuristics should be safe on compliant * devices. This should be safe with compliant devices, as in the very * unlikely case where a UVC 1.1 device would send timing information * only before the first packet containing data, and both STC and SOF * happen to be zero for a particular frame, we would only miss one * clock sample from many and the clock recovery algorithm wouldn't * suffer from this condition. */ if (buf && buf->bytesused == 0 && len == header_size && sample.dev_stc == 0 && sample.dev_sof == 0) return; sample.host_sof = usb_get_current_frame_number(stream->dev->udev); /* * On some devices, like the Logitech C922, the device SOF does not run * at a stable rate of 1kHz. For those devices use the host SOF instead. * In the tests performed so far, this improves the timestamp precision. * This is probably explained by a small packet handling jitter from the * host, but the exact reason hasn't been fully determined. */ if (stream->dev->quirks & UVC_QUIRK_INVALID_DEVICE_SOF) sample.dev_sof = sample.host_sof; sample.host_time = uvc_video_get_time(); /* * The UVC specification allows device implementations that can't obtain * the USB frame number to keep their own frame counters as long as they * match the size and frequency of the frame number associated with USB * SOF tokens. The SOF values sent by such devices differ from the USB * SOF tokens by a fixed offset that needs to be estimated and accounted * for to make timestamp recovery as accurate as possible. * * The offset is estimated the first time a device SOF value is received * as the difference between the host and device SOF values. As the two * SOF values can differ slightly due to transmission delays, consider * that the offset is null if the difference is not higher than 10 ms * (negative differences can not happen and are thus considered as an * offset). The video commit control wDelay field should be used to * compute a dynamic threshold instead of using a fixed 10 ms value, but * devices don't report reliable wDelay values. * * See uvc_video_clock_host_sof() for an explanation regarding why only * the 8 LSBs of the delta are kept. */ if (stream->clock.sof_offset == (u16)-1) { u16 delta_sof = (sample.host_sof - sample.dev_sof) & 255; if (delta_sof >= 10) stream->clock.sof_offset = delta_sof; else stream->clock.sof_offset = 0; } sample.dev_sof = (sample.dev_sof + stream->clock.sof_offset) & 2047; uvc_video_clock_add_sample(&stream->clock, &sample); stream->clock.last_sof = sample.dev_sof; } static void uvc_video_clock_reset(struct uvc_clock *clock) { clock->head = 0; clock->count = 0; clock->last_sof = -1; clock->last_sof_overflow = -1; clock->sof_offset = -1; } static int uvc_video_clock_init(struct uvc_clock *clock) { spin_lock_init(&clock->lock); clock->size = 32; clock->samples = kmalloc_array(clock->size, sizeof(*clock->samples), GFP_KERNEL); if (clock->samples == NULL) return -ENOMEM; uvc_video_clock_reset(clock); return 0; } static void uvc_video_clock_cleanup(struct uvc_clock *clock) { kfree(clock->samples); clock->samples = NULL; } /* * uvc_video_clock_host_sof - Return the host SOF value for a clock sample * * Host SOF counters reported by usb_get_current_frame_number() usually don't * cover the whole 11-bits SOF range (0-2047) but are limited to the HCI frame * schedule window. They can be limited to 8, 9 or 10 bits depending on the host * controller and its configuration. * * We thus need to recover the SOF value corresponding to the host frame number. * As the device and host frame numbers are sampled in a short interval, the * difference between their values should be equal to a small delta plus an * integer multiple of 256 caused by the host frame number limited precision. * * To obtain the recovered host SOF value, compute the small delta by masking * the high bits of the host frame counter and device SOF difference and add it * to the device SOF value. */ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample) { /* The delta value can be negative. */ s8 delta_sof; delta_sof = (sample->host_sof - sample->dev_sof) & 255; return (sample->dev_sof + delta_sof) & 2047; } /* * uvc_video_clock_update - Update the buffer timestamp * * This function converts the buffer PTS timestamp to the host clock domain by * going through the USB SOF clock domain and stores the result in the V4L2 * buffer timestamp field. * * The relationship between the device clock and the host clock isn't known. * However, the device and the host share the common USB SOF clock which can be * used to recover that relationship. * * The relationship between the device clock and the USB SOF clock is considered * to be linear over the clock samples sliding window and is given by * * SOF = m * PTS + p * * Several methods to compute the slope (m) and intercept (p) can be used. As * the clock drift should be small compared to the sliding window size, we * assume that the line that goes through the points at both ends of the window * is a good approximation. Naming those points P1 and P2, we get * * SOF = (SOF2 - SOF1) / (STC2 - STC1) * PTS * + (SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) * * or * * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1) * * to avoid losing precision in the division. Similarly, the host timestamp is * computed with * * TS = ((TS2 - TS1) * SOF + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2) * * SOF values are coded on 11 bits by USB. We extend their precision with 16 * decimal bits, leading to a 11.16 coding. * * TODO: To avoid surprises with device clock values, PTS/STC timestamps should * be normalized using the nominal device clock frequency reported through the * UVC descriptors. * * Both the PTS/STC and SOF counters roll over, after a fixed but device * specific amount of time for PTS/STC and after 2048ms for SOF. As long as the * sliding window size is smaller than the rollover period, differences computed * on unsigned integers will produce the correct result. However, the p term in * the linear relations will be miscomputed. * * To fix the issue, we subtract a constant from the PTS and STC values to bring * PTS to half the 32 bit STC range. The sliding window STC values then fit into * the 32 bit range without any rollover. * * Similarly, we add 2048 to the device SOF values to make sure that the SOF * computed by (1) will never be smaller than 0. This offset is then compensated * by adding 2048 to the SOF values used in (2). However, this doesn't prevent * rollovers between (1) and (2): the SOF value computed by (1) can be slightly * lower than 4096, and the host SOF counters can have rolled over to 2048. This * case is handled by subtracting 2048 from the SOF value if it exceeds the host * SOF value at the end of the sliding window. * * Finally we subtract a constant from the host timestamps to bring the first * timestamp of the sliding window to 1s. */ void uvc_video_clock_update(struct uvc_streaming *stream, struct vb2_v4l2_buffer *vbuf, struct uvc_buffer *buf) { struct uvc_clock *clock = &stream->clock; struct uvc_clock_sample *first; struct uvc_clock_sample *last; unsigned long flags; u64 timestamp; u32 delta_stc; u32 y1; u32 x1, x2; u32 mean; u32 sof; u64 y, y2; if (!uvc_hw_timestamps_param) return; /* * We will get called from __vb2_queue_cancel() if there are buffers * done but not dequeued by the user, but the sample array has already * been released at that time. Just bail out in that case. */ if (!clock->samples) return; spin_lock_irqsave(&clock->lock, flags); if (clock->count < 2) goto done; first = &clock->samples[(clock->head - clock->count + clock->size) % clock->size]; last = &clock->samples[(clock->head - 1 + clock->size) % clock->size]; /* First step, PTS to SOF conversion. */ delta_stc = buf->pts - (1UL << 31); x1 = first->dev_stc - delta_stc; x2 = last->dev_stc - delta_stc; if (x1 == x2) goto done; y1 = (first->dev_sof + 2048) << 16; y2 = (last->dev_sof + 2048) << 16; if (y2 < y1) y2 += 2048 << 16; /* * Have at least 1/4 of a second of timestamps before we * try to do any calculation. Otherwise we do not have enough * precision. This value was determined by running Android CTS * on different devices. * * dev_sof runs at 1KHz, and we have a fixed point precision of * 16 bits. */ if ((y2 - y1) < ((1000 / 4) << 16)) goto done; y = (u64)(y2 - y1) * (1ULL << 31) + (u64)y1 * (u64)x2 - (u64)y2 * (u64)x1; y = div_u64(y, x2 - x1); sof = y; uvc_dbg(stream->dev, CLOCK, "%s: PTS %u y %llu.%06llu SOF %u.%06llu (x1 %u x2 %u y1 %u y2 %llu SOF offset %u)\n", stream->dev->name, buf->pts, y >> 16, div_u64((y & 0xffff) * 1000000, 65536), sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536), x1, x2, y1, y2, clock->sof_offset); /* Second step, SOF to host clock conversion. */ x1 = (uvc_video_clock_host_sof(first) + 2048) << 16; x2 = (uvc_video_clock_host_sof(last) + 2048) << 16; if (x2 < x1) x2 += 2048 << 16; if (x1 == x2) goto done; y1 = NSEC_PER_SEC; y2 = ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1; /* * Interpolated and host SOF timestamps can wrap around at slightly * different times. Handle this by adding or removing 2048 to or from * the computed SOF value to keep it close to the SOF samples mean * value. */ mean = (x1 + x2) / 2; if (mean - (1024 << 16) > sof) sof += 2048 << 16; else if (sof > mean + (1024 << 16)) sof -= 2048 << 16; y = (u64)(y2 - y1) * (u64)sof + (u64)y1 * (u64)x2 - (u64)y2 * (u64)x1; y = div_u64(y, x2 - x1); timestamp = ktime_to_ns(first->host_time) + y - y1; uvc_dbg(stream->dev, CLOCK, "%s: SOF %u.%06llu y %llu ts %llu buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %llu)\n", stream->dev->name, sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536), y, timestamp, vbuf->vb2_buf.timestamp, x1, first->host_sof, first->dev_sof, x2, last->host_sof, last->dev_sof, y1, y2); /* Update the V4L2 buffer. */ vbuf->vb2_buf.timestamp = timestamp; done: spin_unlock_irqrestore(&clock->lock, flags); } /* ------------------------------------------------------------------------ * Stream statistics */ static void uvc_video_stats_decode(struct uvc_streaming *stream, const u8 *data, int len) { unsigned int header_size; bool has_pts = false; bool has_scr = false; u16 scr_sof; u32 scr_stc; u32 pts; if (stream->stats.stream.nb_frames == 0 && stream->stats.frame.nb_packets == 0) stream->stats.stream.start_ts = ktime_get(); switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) { case UVC_STREAM_PTS | UVC_STREAM_SCR: header_size = 12; has_pts = true; has_scr = true; break; case UVC_STREAM_PTS: header_size = 6; has_pts = true; break; case UVC_STREAM_SCR: header_size = 8; has_scr = true; break; default: header_size = 2; break; } /* Check for invalid headers. */ if (len < header_size || data[0] < header_size) { stream->stats.frame.nb_invalid++; return; } /* Extract the timestamps. */ if (has_pts) pts = get_unaligned_le32(&data[2]); if (has_scr) { scr_stc = get_unaligned_le32(&data[header_size - 6]); scr_sof = get_unaligned_le16(&data[header_size - 2]); } /* Is PTS constant through the whole frame ? */ if (has_pts && stream->stats.frame.nb_pts) { if (stream->stats.frame.pts != pts) { stream->stats.frame.nb_pts_diffs++; stream->stats.frame.last_pts_diff = stream->stats.frame.nb_packets; } } if (has_pts) { stream->stats.frame.nb_pts++; stream->stats.frame.pts = pts; } /* * Do all frames have a PTS in their first non-empty packet, or before * their first empty packet ? */ if (stream->stats.frame.size == 0) { if (len > header_size) stream->stats.frame.has_initial_pts = has_pts; if (len == header_size && has_pts) stream->stats.frame.has_early_pts = true; } /* Do the SCR.STC and SCR.SOF fields vary through the frame ? */ if (has_scr && stream->stats.frame.nb_scr) { if (stream->stats.frame.scr_stc != scr_stc) stream->stats.frame.nb_scr_diffs++; } if (has_scr) { /* Expand the SOF counter to 32 bits and store its value. */ if (stream->stats.stream.nb_frames > 0 || stream->stats.frame.nb_scr > 0) stream->stats.stream.scr_sof_count += (scr_sof - stream->stats.stream.scr_sof) % 2048; stream->stats.stream.scr_sof = scr_sof; stream->stats.frame.nb_scr++; stream->stats.frame.scr_stc = scr_stc; stream->stats.frame.scr_sof = scr_sof; if (scr_sof < stream->stats.stream.min_sof) stream->stats.stream.min_sof = scr_sof; if (scr_sof > stream->stats.stream.max_sof) stream->stats.stream.max_sof = scr_sof; } /* Record the first non-empty packet number. */ if (stream->stats.frame.size == 0 && len > header_size) stream->stats.frame.first_data = stream->stats.frame.nb_packets; /* Update the frame size. */ stream->stats.frame.size += len - header_size; /* Update the packets counters. */ stream->stats.frame.nb_packets++; if (len <= header_size) stream->stats.frame.nb_empty++; if (data[1] & UVC_STREAM_ERR) stream->stats.frame.nb_errors++; } static void uvc_video_stats_update(struct uvc_streaming *stream) { struct uvc_stats_frame *frame = &stream->stats.frame; uvc_dbg(stream->dev, STATS, "frame %u stats: %u/%u/%u packets, %u/%u/%u pts (%searly %sinitial), %u/%u scr, last pts/stc/sof %u/%u/%u\n", stream->sequence, frame->first_data, frame->nb_packets - frame->nb_empty, frame->nb_packets, frame->nb_pts_diffs, frame->last_pts_diff, frame->nb_pts, frame->has_early_pts ? "" : "!", frame->has_initial_pts ? "" : "!", frame->nb_scr_diffs, frame->nb_scr, frame->pts, frame->scr_stc, frame->scr_sof); stream->stats.stream.nb_frames++; stream->stats.stream.nb_packets += stream->stats.frame.nb_packets; stream->stats.stream.nb_empty += stream->stats.frame.nb_empty; stream->stats.stream.nb_errors += stream->stats.frame.nb_errors; stream->stats.stream.nb_invalid += stream->stats.frame.nb_invalid; if (frame->has_early_pts) stream->stats.stream.nb_pts_early++; if (frame->has_initial_pts) stream->stats.stream.nb_pts_initial++; if (frame->last_pts_diff <= frame->first_data) stream->stats.stream.nb_pts_constant++; if (frame->nb_scr >= frame->nb_packets - frame->nb_empty) stream->stats.stream.nb_scr_count_ok++; if (frame->nb_scr_diffs + 1 == frame->nb_scr) stream->stats.stream.nb_scr_diffs_ok++; memset(&stream->stats.frame, 0, sizeof(stream->stats.frame)); } size_t uvc_video_stats_dump(struct uvc_streaming *stream, char *buf, size_t size) { unsigned int scr_sof_freq; unsigned int duration; size_t count = 0; /* * Compute the SCR.SOF frequency estimate. At the nominal 1kHz SOF * frequency this will not overflow before more than 1h. */ duration = ktime_ms_delta(stream->stats.stream.stop_ts, stream->stats.stream.start_ts); if (duration != 0) scr_sof_freq = stream->stats.stream.scr_sof_count * 1000 / duration; else scr_sof_freq = 0; count += scnprintf(buf + count, size - count, "frames: %u\npackets: %u\nempty: %u\n" "errors: %u\ninvalid: %u\n", stream->stats.stream.nb_frames, stream->stats.stream.nb_packets, stream->stats.stream.nb_empty, stream->stats.stream.nb_errors, stream->stats.stream.nb_invalid); count += scnprintf(buf + count, size - count, "pts: %u early, %u initial, %u ok\n", stream->stats.stream.nb_pts_early, stream->stats.stream.nb_pts_initial, stream->stats.stream.nb_pts_constant); count += scnprintf(buf + count, size - count, "scr: %u count ok, %u diff ok\n", stream->stats.stream.nb_scr_count_ok, stream->stats.stream.nb_scr_diffs_ok); count += scnprintf(buf + count, size - count, "sof: %u <= sof <= %u, freq %u.%03u kHz\n", stream->stats.stream.min_sof, stream->stats.stream.max_sof, scr_sof_freq / 1000, scr_sof_freq % 1000); return count; } static void uvc_video_stats_start(struct uvc_streaming *stream) { memset(&stream->stats, 0, sizeof(stream->stats)); stream->stats.stream.min_sof = 2048; } static void uvc_video_stats_stop(struct uvc_streaming *stream) { stream->stats.stream.stop_ts = ktime_get(); } /* ------------------------------------------------------------------------ * Video codecs */ /* * Video payload decoding is handled by uvc_video_decode_start(), * uvc_video_decode_data() and uvc_video_decode_end(). * * uvc_video_decode_start is called with URB data at the start of a bulk or * isochronous payload. It processes header data and returns the header size * in bytes if successful. If an error occurs, it returns a negative error * code. The following error codes have special meanings. * * - EAGAIN informs the caller that the current video buffer should be marked * as done, and that the function should be called again with the same data * and a new video buffer. This is used when end of frame conditions can be * reliably detected at the beginning of the next frame only. * * If an error other than -EAGAIN is returned, the caller will drop the current * payload. No call to uvc_video_decode_data and uvc_video_decode_end will be * made until the next payload. -ENODATA can be used to drop the current * payload if no other error code is appropriate. * * uvc_video_decode_data is called for every URB with URB data. It copies the * data to the video buffer. * * uvc_video_decode_end is called with header data at the end of a bulk or * isochronous payload. It performs any additional header data processing and * returns 0 or a negative error code if an error occurred. As header data have * already been processed by uvc_video_decode_start, this functions isn't * required to perform sanity checks a second time. * * For isochronous transfers where a payload is always transferred in a single * URB, the three functions will be called in a row. * * To let the decoder process header data and update its internal state even * when no video buffer is available, uvc_video_decode_start must be prepared * to be called with a NULL buf parameter. uvc_video_decode_data and * uvc_video_decode_end will never be called with a NULL buffer. */ static int uvc_video_decode_start(struct uvc_streaming *stream, struct uvc_buffer *buf, const u8 *data, int len) { u8 header_len; u8 fid; /* * Sanity checks: * - packet must be at least 2 bytes long * - bHeaderLength value must be at least 2 bytes (see above) * - bHeaderLength value can't be larger than the packet size. */ if (len < 2 || data[0] < 2 || data[0] > len) { stream->stats.frame.nb_invalid++; return -EINVAL; } header_len = data[0]; fid = data[1] & UVC_STREAM_FID; /* * Increase the sequence number regardless of any buffer states, so * that discontinuous sequence numbers always indicate lost frames. */ if (stream->last_fid != fid) { stream->sequence++; if (stream->sequence) uvc_video_stats_update(stream); } uvc_video_clock_decode(stream, buf, data, len); uvc_video_stats_decode(stream, data, len); /* * Store the payload FID bit and return immediately when the buffer is * NULL. */ if (buf == NULL) { stream->last_fid = fid; return -ENODATA; } /* Mark the buffer as bad if the error bit is set. */ if (data[1] & UVC_STREAM_ERR) { uvc_dbg(stream->dev, FRAME, "Marking buffer as bad (error bit set)\n"); buf->error = 1; } /* * Synchronize to the input stream by waiting for the FID bit to be * toggled when the buffer state is not UVC_BUF_STATE_ACTIVE. * stream->last_fid is initialized to -1, so the first isochronous * frame will always be in sync. * * If the device doesn't toggle the FID bit, invert stream->last_fid * when the EOF bit is set to force synchronisation on the next packet. */ if (buf->state != UVC_BUF_STATE_ACTIVE) { if (fid == stream->last_fid) { uvc_dbg(stream->dev, FRAME, "Dropping payload (out of sync)\n"); if ((stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID) && (data[1] & UVC_STREAM_EOF)) stream->last_fid ^= UVC_STREAM_FID; return -ENODATA; } buf->buf.field = V4L2_FIELD_NONE; buf->buf.sequence = stream->sequence; buf->buf.vb2_buf.timestamp = ktime_to_ns(uvc_video_get_time()); /* TODO: Handle PTS and SCR. */ buf->state = UVC_BUF_STATE_ACTIVE; } /* * Mark the buffer as done if we're at the beginning of a new frame. * End of frame detection is better implemented by checking the EOF * bit (FID bit toggling is delayed by one frame compared to the EOF * bit), but some devices don't set the bit at end of frame (and the * last payload can be lost anyway). We thus must check if the FID has * been toggled. * * stream->last_fid is initialized to -1, so the first isochronous * frame will never trigger an end of frame detection. * * Empty buffers (bytesused == 0) don't trigger end of frame detection * as it doesn't make sense to return an empty buffer. This also * avoids detecting end of frame conditions at FID toggling if the * previous payload had the EOF bit set. */ if (fid != stream->last_fid && buf->bytesused != 0) { uvc_dbg(stream->dev, FRAME, "Frame complete (FID bit toggled)\n"); buf->state = UVC_BUF_STATE_READY; return -EAGAIN; } /* * Some cameras, when running two parallel streams (one MJPEG alongside * another non-MJPEG stream), are known to lose the EOF packet for a frame. * We can detect the end of a frame by checking for a new SOI marker, as * the SOI always lies on the packet boundary between two frames for * these devices. */ if (stream->dev->quirks & UVC_QUIRK_MJPEG_NO_EOF && (stream->cur_format->fcc == V4L2_PIX_FMT_MJPEG || stream->cur_format->fcc == V4L2_PIX_FMT_JPEG)) { const u8 *packet = data + header_len; if (len >= header_len + 2 && packet[0] == 0xff && packet[1] == JPEG_MARKER_SOI && buf->bytesused != 0) { buf->state = UVC_BUF_STATE_READY; buf->error = 1; stream->last_fid ^= UVC_STREAM_FID; return -EAGAIN; } } stream->last_fid = fid; return header_len; } static inline enum dma_data_direction uvc_stream_dir( struct uvc_streaming *stream) { if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return DMA_FROM_DEVICE; else return DMA_TO_DEVICE; } static inline struct device *uvc_stream_to_dmadev(struct uvc_streaming *stream) { return bus_to_hcd(stream->dev->udev->bus)->self.sysdev; } static int uvc_submit_urb(struct uvc_urb *uvc_urb, gfp_t mem_flags) { /* Sync DMA. */ dma_sync_sgtable_for_device(uvc_stream_to_dmadev(uvc_urb->stream), uvc_urb->sgt, uvc_stream_dir(uvc_urb->stream)); return usb_submit_urb(uvc_urb->urb, mem_flags); } /* * uvc_video_decode_data_work: Asynchronous memcpy processing * * Copy URB data to video buffers in process context, releasing buffer * references and requeuing the URB when done. */ static void uvc_video_copy_data_work(struct work_struct *work) { struct uvc_urb *uvc_urb = container_of(work, struct uvc_urb, work); unsigned int i; int ret; for (i = 0; i < uvc_urb->async_operations; i++) { struct uvc_copy_op *op = &uvc_urb->copy_operations[i]; memcpy(op->dst, op->src, op->len); /* Release reference taken on this buffer. */ uvc_queue_buffer_release(op->buf); } ret = uvc_submit_urb(uvc_urb, GFP_KERNEL); if (ret < 0) dev_err(&uvc_urb->stream->intf->dev, "Failed to resubmit video URB (%d).\n", ret); } static void uvc_video_decode_data(struct uvc_urb *uvc_urb, struct uvc_buffer *buf, const u8 *data, int len) { unsigned int active_op = uvc_urb->async_operations; struct uvc_copy_op *op = &uvc_urb->copy_operations[active_op]; unsigned int maxlen; if (len <= 0) return; maxlen = buf->length - buf->bytesused; /* Take a buffer reference for async work. */ kref_get(&buf->ref); op->buf = buf; op->src = data; op->dst = buf->mem + buf->bytesused; op->len = min_t(unsigned int, len, maxlen); buf->bytesused += op->len; /* Complete the current frame if the buffer size was exceeded. */ if (len > maxlen) { uvc_dbg(uvc_urb->stream->dev, FRAME, "Frame complete (overflow)\n"); buf->error = 1; buf->state = UVC_BUF_STATE_READY; } uvc_urb->async_operations++; } static void uvc_video_decode_end(struct uvc_streaming *stream, struct uvc_buffer *buf, const u8 *data, int len) { /* Mark the buffer as done if the EOF marker is set. */ if (data[1] & UVC_STREAM_EOF && buf->bytesused != 0) { uvc_dbg(stream->dev, FRAME, "Frame complete (EOF found)\n"); if (data[0] == len) uvc_dbg(stream->dev, FRAME, "EOF in empty payload\n"); buf->state = UVC_BUF_STATE_READY; if (stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID) stream->last_fid ^= UVC_STREAM_FID; } } /* * Video payload encoding is handled by uvc_video_encode_header() and * uvc_video_encode_data(). Only bulk transfers are currently supported. * * uvc_video_encode_header is called at the start of a payload. It adds header * data to the transfer buffer and returns the header size. As the only known * UVC output device transfers a whole frame in a single payload, the EOF bit * is always set in the header. * * uvc_video_encode_data is called for every URB and copies the data from the * video buffer to the transfer buffer. */ static int uvc_video_encode_header(struct uvc_streaming *stream, struct uvc_buffer *buf, u8 *data, int len) { data[0] = 2; /* Header length */ data[1] = UVC_STREAM_EOH | UVC_STREAM_EOF | (stream->last_fid & UVC_STREAM_FID); return 2; } static int uvc_video_encode_data(struct uvc_streaming *stream, struct uvc_buffer *buf, u8 *data, int len) { struct uvc_video_queue *queue = &stream->queue; unsigned int nbytes; void *mem; /* Copy video data to the URB buffer. */ mem = buf->mem + queue->buf_used; nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used); nbytes = min(stream->bulk.max_payload_size - stream->bulk.payload_size, nbytes); memcpy(data, mem, nbytes); queue->buf_used += nbytes; return nbytes; } /* ------------------------------------------------------------------------ * Metadata */ /* * Additionally to the payload headers we also want to provide the user with USB * Frame Numbers and system time values. The resulting buffer is thus composed * of blocks, containing a 64-bit timestamp in nanoseconds, a 16-bit USB Frame * Number, and a copy of the payload header. * * Ideally we want to capture all payload headers for each frame. However, their * number is unknown and unbound. We thus drop headers that contain no vendor * data and that either contain no SCR value or an SCR value identical to the * previous header. */ static void uvc_video_decode_meta(struct uvc_streaming *stream, struct uvc_buffer *meta_buf, const u8 *mem, unsigned int length) { struct uvc_meta_buf *meta; size_t len_std = 2; bool has_pts, has_scr; unsigned long flags; unsigned int sof; ktime_t time; const u8 *scr; if (!meta_buf || length == 2) return; if (meta_buf->length - meta_buf->bytesused < length + sizeof(meta->ns) + sizeof(meta->sof)) { meta_buf->error = 1; return; } has_pts = mem[1] & UVC_STREAM_PTS; has_scr = mem[1] & UVC_STREAM_SCR; if (has_pts) { len_std += 4; scr = mem + 6; } else { scr = mem + 2; } if (has_scr) len_std += 6; if (stream->meta.format == V4L2_META_FMT_UVC) length = len_std; if (length == len_std && (!has_scr || !memcmp(scr, stream->clock.last_scr, 6))) return; meta = (struct uvc_meta_buf *)((u8 *)meta_buf->mem + meta_buf->bytesused); local_irq_save(flags); time = uvc_video_get_time(); sof = usb_get_current_frame_number(stream->dev->udev); local_irq_restore(flags); put_unaligned(ktime_to_ns(time), &meta->ns); put_unaligned(sof, &meta->sof); if (has_scr) memcpy(stream->clock.last_scr, scr, 6); meta->length = mem[0]; meta->flags = mem[1]; memcpy(meta->buf, &mem[2], length - 2); meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof); uvc_dbg(stream->dev, FRAME, "%s(): t-sys %lluns, SOF %u, len %u, flags 0x%x, PTS %u, STC %u frame SOF %u\n", __func__, ktime_to_ns(time), meta->sof, meta->length, meta->flags, has_pts ? *(u32 *)meta->buf : 0, has_scr ? *(u32 *)scr : 0, has_scr ? *(u32 *)(scr + 4) & 0x7ff : 0); } /* ------------------------------------------------------------------------ * URB handling */ /* * Set error flag for incomplete buffer. */ static void uvc_video_validate_buffer(const struct uvc_streaming *stream, struct uvc_buffer *buf) { if (stream->ctrl.dwMaxVideoFrameSize != buf->bytesused && !(stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED)) buf->error = 1; } /* * Completion handler for video URBs. */ static void uvc_video_next_buffers(struct uvc_streaming *stream, struct uvc_buffer **video_buf, struct uvc_buffer **meta_buf) { uvc_video_validate_buffer(stream, *video_buf); if (*meta_buf) { struct vb2_v4l2_buffer *vb2_meta = &(*meta_buf)->buf; const struct vb2_v4l2_buffer *vb2_video = &(*video_buf)->buf; vb2_meta->sequence = vb2_video->sequence; vb2_meta->field = vb2_video->field; vb2_meta->vb2_buf.timestamp = vb2_video->vb2_buf.timestamp; (*meta_buf)->state = UVC_BUF_STATE_READY; if (!(*meta_buf)->error) (*meta_buf)->error = (*video_buf)->error; *meta_buf = uvc_queue_next_buffer(&stream->meta.queue, *meta_buf); } *video_buf = uvc_queue_next_buffer(&stream->queue, *video_buf); } static void uvc_video_decode_isoc(struct uvc_urb *uvc_urb, struct uvc_buffer *buf, struct uvc_buffer *meta_buf) { struct urb *urb = uvc_urb->urb; struct uvc_streaming *stream = uvc_urb->stream; u8 *mem; int ret, i; for (i = 0; i < urb->number_of_packets; ++i) { if (urb->iso_frame_desc[i].status < 0) { uvc_dbg(stream->dev, FRAME, "USB isochronous frame lost (%d)\n", urb->iso_frame_desc[i].status); /* Mark the buffer as faulty. */ if (buf != NULL) buf->error = 1; continue; } /* Decode the payload header. */ mem = urb->transfer_buffer + urb->iso_frame_desc[i].offset; do { ret = uvc_video_decode_start(stream, buf, mem, urb->iso_frame_desc[i].actual_length); if (ret == -EAGAIN) uvc_video_next_buffers(stream, &buf, &meta_buf); } while (ret == -EAGAIN); if (ret < 0) continue; uvc_video_decode_meta(stream, meta_buf, mem, ret); /* Decode the payload data. */ uvc_video_decode_data(uvc_urb, buf, mem + ret, urb->iso_frame_desc[i].actual_length - ret); /* Process the header again. */ uvc_video_decode_end(stream, buf, mem, urb->iso_frame_desc[i].actual_length); if (buf->state == UVC_BUF_STATE_READY) uvc_video_next_buffers(stream, &buf, &meta_buf); } } static void uvc_video_decode_bulk(struct uvc_urb *uvc_urb, struct uvc_buffer *buf, struct uvc_buffer *meta_buf) { struct urb *urb = uvc_urb->urb; struct uvc_streaming *stream = uvc_urb->stream; u8 *mem; int len, ret; /* * Ignore ZLPs if they're not part of a frame, otherwise process them * to trigger the end of payload detection. */ if (urb->actual_length == 0 && stream->bulk.header_size == 0) return; mem = urb->transfer_buffer; len = urb->actual_length; stream->bulk.payload_size += len; /* * If the URB is the first of its payload, decode and save the * header. */ if (stream->bulk.header_size == 0 && !stream->bulk.skip_payload) { do { ret = uvc_video_decode_start(stream, buf, mem, len); if (ret == -EAGAIN) uvc_video_next_buffers(stream, &buf, &meta_buf); } while (ret == -EAGAIN); /* If an error occurred skip the rest of the payload. */ if (ret < 0 || buf == NULL) { stream->bulk.skip_payload = 1; } else { memcpy(stream->bulk.header, mem, ret); stream->bulk.header_size = ret; uvc_video_decode_meta(stream, meta_buf, mem, ret); mem += ret; len -= ret; } } /* * The buffer queue might have been cancelled while a bulk transfer * was in progress, so we can reach here with buf equal to NULL. Make * sure buf is never dereferenced if NULL. */ /* Prepare video data for processing. */ if (!stream->bulk.skip_payload && buf != NULL) uvc_video_decode_data(uvc_urb, buf, mem, len); /* * Detect the payload end by a URB smaller than the maximum size (or * a payload size equal to the maximum) and process the header again. */ if (urb->actual_length < urb->transfer_buffer_length || stream->bulk.payload_size >= stream->bulk.max_payload_size) { if (!stream->bulk.skip_payload && buf != NULL) { uvc_video_decode_end(stream, buf, stream->bulk.header, stream->bulk.payload_size); if (buf->state == UVC_BUF_STATE_READY) uvc_video_next_buffers(stream, &buf, &meta_buf); } stream->bulk.header_size = 0; stream->bulk.skip_payload = 0; stream->bulk.payload_size = 0; } } static void uvc_video_encode_bulk(struct uvc_urb *uvc_urb, struct uvc_buffer *buf, struct uvc_buffer *meta_buf) { struct urb *urb = uvc_urb->urb; struct uvc_streaming *stream = uvc_urb->stream; u8 *mem = urb->transfer_buffer; int len = stream->urb_size, ret; if (buf == NULL) { urb->transfer_buffer_length = 0; return; } /* If the URB is the first of its payload, add the header. */ if (stream->bulk.header_size == 0) { ret = uvc_video_encode_header(stream, buf, mem, len); stream->bulk.header_size = ret; stream->bulk.payload_size += ret; mem += ret; len -= ret; } /* Process video data. */ ret = uvc_video_encode_data(stream, buf, mem, len); stream->bulk.payload_size += ret; len -= ret; if (buf->bytesused == stream->queue.buf_used || stream->bulk.payload_size == stream->bulk.max_payload_size) { if (buf->bytesused == stream->queue.buf_used) { stream->queue.buf_used = 0; buf->state = UVC_BUF_STATE_READY; buf->buf.sequence = ++stream->sequence; uvc_queue_next_buffer(&stream->queue, buf); stream->last_fid ^= UVC_STREAM_FID; } stream->bulk.header_size = 0; stream->bulk.payload_size = 0; } urb->transfer_buffer_length = stream->urb_size - len; } static void uvc_video_complete(struct urb *urb) { struct uvc_urb *uvc_urb = urb->context; struct uvc_streaming *stream = uvc_urb->stream; struct uvc_video_queue *queue = &stream->queue; struct uvc_video_queue *qmeta = &stream->meta.queue; struct vb2_queue *vb2_qmeta = stream->meta.vdev.queue; struct uvc_buffer *buf = NULL; struct uvc_buffer *buf_meta = NULL; unsigned long flags; int ret; switch (urb->status) { case 0: break; default: dev_warn(&stream->intf->dev, "Non-zero status (%d) in video completion handler.\n", urb->status); fallthrough; case -ENOENT: /* usb_poison_urb() called. */ if (stream->frozen) return; fallthrough; case -ECONNRESET: /* usb_unlink_urb() called. */ case -ESHUTDOWN: /* The endpoint is being disabled. */ uvc_queue_cancel(queue, urb->status == -ESHUTDOWN); if (vb2_qmeta) uvc_queue_cancel(qmeta, urb->status == -ESHUTDOWN); return; } buf = uvc_queue_get_current_buffer(queue); if (vb2_qmeta) { spin_lock_irqsave(&qmeta->irqlock, flags); if (!list_empty(&qmeta->irqqueue)) buf_meta = list_first_entry(&qmeta->irqqueue, struct uvc_buffer, queue); spin_unlock_irqrestore(&qmeta->irqlock, flags); } /* Re-initialise the URB async work. */ uvc_urb->async_operations = 0; /* Sync DMA and invalidate vmap range. */ dma_sync_sgtable_for_cpu(uvc_stream_to_dmadev(uvc_urb->stream), uvc_urb->sgt, uvc_stream_dir(stream)); invalidate_kernel_vmap_range(uvc_urb->buffer, uvc_urb->stream->urb_size); /* * Process the URB headers, and optionally queue expensive memcpy tasks * to be deferred to a work queue. */ stream->decode(uvc_urb, buf, buf_meta); /* If no async work is needed, resubmit the URB immediately. */ if (!uvc_urb->async_operations) { ret = uvc_submit_urb(uvc_urb, GFP_ATOMIC); if (ret < 0) dev_err(&stream->intf->dev, "Failed to resubmit video URB (%d).\n", ret); return; } queue_work(stream->async_wq, &uvc_urb->work); } /* * Free transfer buffers. */ static void uvc_free_urb_buffers(struct uvc_streaming *stream) { struct device *dma_dev = uvc_stream_to_dmadev(stream); struct uvc_urb *uvc_urb; for_each_uvc_urb(uvc_urb, stream) { if (!uvc_urb->buffer) continue; dma_vunmap_noncontiguous(dma_dev, uvc_urb->buffer); dma_free_noncontiguous(dma_dev, stream->urb_size, uvc_urb->sgt, uvc_stream_dir(stream)); uvc_urb->buffer = NULL; uvc_urb->sgt = NULL; } stream->urb_size = 0; } static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream, struct uvc_urb *uvc_urb, gfp_t gfp_flags) { struct device *dma_dev = uvc_stream_to_dmadev(stream); uvc_urb->sgt = dma_alloc_noncontiguous(dma_dev, stream->urb_size, uvc_stream_dir(stream), gfp_flags, 0); if (!uvc_urb->sgt) return false; uvc_urb->dma = uvc_urb->sgt->sgl->dma_address; uvc_urb->buffer = dma_vmap_noncontiguous(dma_dev, stream->urb_size, uvc_urb->sgt); if (!uvc_urb->buffer) { dma_free_noncontiguous(dma_dev, stream->urb_size, uvc_urb->sgt, uvc_stream_dir(stream)); uvc_urb->sgt = NULL; return false; } return true; } /* * Allocate transfer buffers. This function can be called with buffers * already allocated when resuming from suspend, in which case it will * return without touching the buffers. * * Limit the buffer size to UVC_MAX_PACKETS bulk/isochronous packets. If the * system is too low on memory try successively smaller numbers of packets * until allocation succeeds. * * Return the number of allocated packets on success or 0 when out of memory. */ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream, unsigned int size, unsigned int psize, gfp_t gfp_flags) { unsigned int npackets; unsigned int i; /* Buffers are already allocated, bail out. */ if (stream->urb_size) return stream->urb_size / psize; /* * Compute the number of packets. Bulk endpoints might transfer UVC * payloads across multiple URBs. */ npackets = DIV_ROUND_UP(size, psize); if (npackets > UVC_MAX_PACKETS) npackets = UVC_MAX_PACKETS; /* Retry allocations until one succeed. */ for (; npackets > 1; npackets /= 2) { stream->urb_size = psize * npackets; for (i = 0; i < UVC_URBS; ++i) { struct uvc_urb *uvc_urb = &stream->uvc_urb[i]; if (!uvc_alloc_urb_buffer(stream, uvc_urb, gfp_flags)) { uvc_free_urb_buffers(stream); break; } uvc_urb->stream = stream; } if (i == UVC_URBS) { uvc_dbg(stream->dev, VIDEO, "Allocated %u URB buffers of %ux%u bytes each\n", UVC_URBS, npackets, psize); return npackets; } } uvc_dbg(stream->dev, VIDEO, "Failed to allocate URB buffers (%u bytes per packet)\n", psize); return 0; } /* * Uninitialize isochronous/bulk URBs and free transfer buffers. */ static void uvc_video_stop_transfer(struct uvc_streaming *stream, int free_buffers) { struct uvc_urb *uvc_urb; uvc_video_stats_stop(stream); /* * We must poison the URBs rather than kill them to ensure that even * after the completion handler returns, any asynchronous workqueues * will be prevented from resubmitting the URBs. */ for_each_uvc_urb(uvc_urb, stream) usb_poison_urb(uvc_urb->urb); flush_workqueue(stream->async_wq); for_each_uvc_urb(uvc_urb, stream) { usb_free_urb(uvc_urb->urb); uvc_urb->urb = NULL; } if (free_buffers) uvc_free_urb_buffers(stream); } /* * Compute the maximum number of bytes per interval for an endpoint. */ u16 uvc_endpoint_max_bpi(struct usb_device *dev, struct usb_host_endpoint *ep) { u16 psize; switch (dev->speed) { case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); default: psize = usb_endpoint_maxp(&ep->desc); psize *= usb_endpoint_maxp_mult(&ep->desc); return psize; } } /* * Initialize isochronous URBs and allocate transfer buffers. The packet size * is given by the endpoint. */ static int uvc_init_video_isoc(struct uvc_streaming *stream, struct usb_host_endpoint *ep, gfp_t gfp_flags) { struct urb *urb; struct uvc_urb *uvc_urb; unsigned int npackets, i; u16 psize; u32 size; psize = uvc_endpoint_max_bpi(stream->dev->udev, ep); size = stream->ctrl.dwMaxVideoFrameSize; npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags); if (npackets == 0) return -ENOMEM; size = npackets * psize; for_each_uvc_urb(uvc_urb, stream) { urb = usb_alloc_urb(npackets, gfp_flags); if (urb == NULL) { uvc_video_stop_transfer(stream, 1); return -ENOMEM; } urb->dev = stream->dev->udev; urb->context = uvc_urb; urb->pipe = usb_rcvisocpipe(stream->dev->udev, ep->desc.bEndpointAddress); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = uvc_urb->dma; urb->interval = ep->desc.bInterval; urb->transfer_buffer = uvc_urb->buffer; urb->complete = uvc_video_complete; urb->number_of_packets = npackets; urb->transfer_buffer_length = size; for (i = 0; i < npackets; ++i) { urb->iso_frame_desc[i].offset = i * psize; urb->iso_frame_desc[i].length = psize; } uvc_urb->urb = urb; } return 0; } /* * Initialize bulk URBs and allocate transfer buffers. The packet size is * given by the endpoint. */ static int uvc_init_video_bulk(struct uvc_streaming *stream, struct usb_host_endpoint *ep, gfp_t gfp_flags) { struct urb *urb; struct uvc_urb *uvc_urb; unsigned int npackets, pipe; u16 psize; u32 size; psize = usb_endpoint_maxp(&ep->desc); size = stream->ctrl.dwMaxPayloadTransferSize; stream->bulk.max_payload_size = size; npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags); if (npackets == 0) return -ENOMEM; size = npackets * psize; if (usb_endpoint_dir_in(&ep->desc)) pipe = usb_rcvbulkpipe(stream->dev->udev, ep->desc.bEndpointAddress); else pipe = usb_sndbulkpipe(stream->dev->udev, ep->desc.bEndpointAddress); if (stream->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) size = 0; for_each_uvc_urb(uvc_urb, stream) { urb = usb_alloc_urb(0, gfp_flags); if (urb == NULL) { uvc_video_stop_transfer(stream, 1); return -ENOMEM; } usb_fill_bulk_urb(urb, stream->dev->udev, pipe, uvc_urb->buffer, size, uvc_video_complete, uvc_urb); urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = uvc_urb->dma; uvc_urb->urb = urb; } return 0; } /* * Initialize isochronous/bulk URBs and allocate transfer buffers. */ static int uvc_video_start_transfer(struct uvc_streaming *stream, gfp_t gfp_flags) { struct usb_interface *intf = stream->intf; struct usb_host_endpoint *ep; struct uvc_urb *uvc_urb; unsigned int i; int ret; stream->sequence = -1; stream->last_fid = -1; stream->bulk.header_size = 0; stream->bulk.skip_payload = 0; stream->bulk.payload_size = 0; uvc_video_stats_start(stream); if (intf->num_altsetting > 1) { struct usb_host_endpoint *best_ep = NULL; unsigned int best_psize = UINT_MAX; unsigned int bandwidth; unsigned int altsetting; int intfnum = stream->intfnum; /* Isochronous endpoint, select the alternate setting. */ bandwidth = stream->ctrl.dwMaxPayloadTransferSize; if (bandwidth == 0) { uvc_dbg(stream->dev, VIDEO, "Device requested null bandwidth, defaulting to lowest\n"); bandwidth = 1; } else { uvc_dbg(stream->dev, VIDEO, "Device requested %u B/frame bandwidth\n", bandwidth); } for (i = 0; i < intf->num_altsetting; ++i) { struct usb_host_interface *alts; unsigned int psize; alts = &intf->altsetting[i]; ep = uvc_find_endpoint(alts, stream->header.bEndpointAddress); if (ep == NULL) continue; /* Check if the bandwidth is high enough. */ psize = uvc_endpoint_max_bpi(stream->dev->udev, ep); if (psize >= bandwidth && psize < best_psize) { altsetting = alts->desc.bAlternateSetting; best_psize = psize; best_ep = ep; } } if (best_ep == NULL) { uvc_dbg(stream->dev, VIDEO, "No fast enough alt setting for requested bandwidth\n"); return -EIO; } uvc_dbg(stream->dev, VIDEO, "Selecting alternate setting %u (%u B/frame bandwidth)\n", altsetting, best_psize); /* * Some devices, namely the Logitech C910 and B910, are unable * to recover from a USB autosuspend, unless the alternate * setting of the streaming interface is toggled. */ if (stream->dev->quirks & UVC_QUIRK_WAKE_AUTOSUSPEND) { usb_set_interface(stream->dev->udev, intfnum, altsetting); usb_set_interface(stream->dev->udev, intfnum, 0); } ret = usb_set_interface(stream->dev->udev, intfnum, altsetting); if (ret < 0) return ret; ret = uvc_init_video_isoc(stream, best_ep, gfp_flags); } else { /* Bulk endpoint, proceed to URB initialization. */ ep = uvc_find_endpoint(&intf->altsetting[0], stream->header.bEndpointAddress); if (ep == NULL) return -EIO; /* Reject broken descriptors. */ if (usb_endpoint_maxp(&ep->desc) == 0) return -EIO; ret = uvc_init_video_bulk(stream, ep, gfp_flags); } if (ret < 0) return ret; /* Submit the URBs. */ for_each_uvc_urb(uvc_urb, stream) { ret = uvc_submit_urb(uvc_urb, gfp_flags); if (ret < 0) { dev_err(&stream->intf->dev, "Failed to submit URB %u (%d).\n", uvc_urb_index(uvc_urb), ret); uvc_video_stop_transfer(stream, 1); return ret; } } /* * The Logitech C920 temporarily forgets that it should not be adjusting * Exposure Absolute during init so restore controls to stored values. */ if (stream->dev->quirks & UVC_QUIRK_RESTORE_CTRLS_ON_INIT) uvc_ctrl_restore_values(stream->dev); return 0; } /* -------------------------------------------------------------------------- * Suspend/resume */ /* * Stop streaming without disabling the video queue. * * To let userspace applications resume without trouble, we must not touch the * video buffers in any way. We mark the device as frozen to make sure the URB * completion handler won't try to cancel the queue when we kill the URBs. */ int uvc_video_suspend(struct uvc_streaming *stream) { if (!uvc_queue_streaming(&stream->queue)) return 0; stream->frozen = 1; uvc_video_stop_transfer(stream, 0); usb_set_interface(stream->dev->udev, stream->intfnum, 0); return 0; } /* * Reconfigure the video interface and restart streaming if it was enabled * before suspend. * * If an error occurs, disable the video queue. This will wake all pending * buffers, making sure userspace applications are notified of the problem * instead of waiting forever. */ int uvc_video_resume(struct uvc_streaming *stream, int reset) { int ret; /* * If the bus has been reset on resume, set the alternate setting to 0. * This should be the default value, but some devices crash or otherwise * misbehave if they don't receive a SET_INTERFACE request before any * other video control request. */ if (reset) usb_set_interface(stream->dev->udev, stream->intfnum, 0); stream->frozen = 0; uvc_video_clock_reset(&stream->clock); if (!uvc_queue_streaming(&stream->queue)) return 0; ret = uvc_commit_video(stream, &stream->ctrl); if (ret < 0) return ret; return uvc_video_start_transfer(stream, GFP_NOIO); } /* ------------------------------------------------------------------------ * Video device */ /* * Initialize the UVC video device by switching to alternate setting 0 and * retrieve the default format. * * Some cameras (namely the Fuji Finepix) set the format and frame * indexes to zero. The UVC standard doesn't clearly make this a spec * violation, so try to silently fix the values if possible. * * This function is called before registering the device with V4L. */ int uvc_video_init(struct uvc_streaming *stream) { struct uvc_streaming_control *probe = &stream->ctrl; const struct uvc_format *format = NULL; const struct uvc_frame *frame = NULL; struct uvc_urb *uvc_urb; unsigned int i; int ret; if (stream->nformats == 0) { dev_info(&stream->intf->dev, "No supported video formats found.\n"); return -EINVAL; } atomic_set(&stream->active, 0); /* * Alternate setting 0 should be the default, yet the XBox Live Vision * Cam (and possibly other devices) crash or otherwise misbehave if * they don't receive a SET_INTERFACE request before any other video * control request. */ usb_set_interface(stream->dev->udev, stream->intfnum, 0); /* * Set the streaming probe control with default streaming parameters * retrieved from the device. Webcams that don't support GET_DEF * requests on the probe control will just keep their current streaming * parameters. */ if (uvc_get_video_ctrl(stream, probe, 1, UVC_GET_DEF) == 0) uvc_set_video_ctrl(stream, probe, 1); /* * Initialize the streaming parameters with the probe control current * value. This makes sure SET_CUR requests on the streaming commit * control will always use values retrieved from a successful GET_CUR * request on the probe control, as required by the UVC specification. */ ret = uvc_get_video_ctrl(stream, probe, 1, UVC_GET_CUR); /* * Elgato Cam Link 4k can be in a stalled state if the resolution of * the external source has changed while the firmware initializes. * Once in this state, the device is useless until it receives a * USB reset. It has even been observed that the stalled state will * continue even after unplugging the device. */ if (ret == -EPROTO && usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k)) { dev_err(&stream->intf->dev, "Elgato Cam Link 4K firmware crash detected\n"); dev_err(&stream->intf->dev, "Resetting the device, unplug and replug to recover\n"); usb_reset_device(stream->dev->udev); } if (ret < 0) return ret; /* * Check if the default format descriptor exists. Use the first * available format otherwise. */ for (i = stream->nformats; i > 0; --i) { format = &stream->formats[i-1]; if (format->index == probe->bFormatIndex) break; } if (format->nframes == 0) { dev_info(&stream->intf->dev, "No frame descriptor found for the default format.\n"); return -EINVAL; } /* * Zero bFrameIndex might be correct. Stream-based formats (including * MPEG-2 TS and DV) do not support frames but have a dummy frame * descriptor with bFrameIndex set to zero. If the default frame * descriptor is not found, use the first available frame. */ for (i = format->nframes; i > 0; --i) { frame = &format->frames[i-1]; if (frame->bFrameIndex == probe->bFrameIndex) break; } probe->bFormatIndex = format->index; probe->bFrameIndex = frame->bFrameIndex; stream->def_format = format; stream->cur_format = format; stream->cur_frame = frame; /* Select the video decoding function */ if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { if (stream->dev->quirks & UVC_QUIRK_BUILTIN_ISIGHT) stream->decode = uvc_video_decode_isight; else if (stream->intf->num_altsetting > 1) stream->decode = uvc_video_decode_isoc; else stream->decode = uvc_video_decode_bulk; } else { if (stream->intf->num_altsetting == 1) stream->decode = uvc_video_encode_bulk; else { dev_info(&stream->intf->dev, "Isochronous endpoints are not supported for video output devices.\n"); return -EINVAL; } } /* Prepare asynchronous work items. */ for_each_uvc_urb(uvc_urb, stream) INIT_WORK(&uvc_urb->work, uvc_video_copy_data_work); return 0; } int uvc_video_start_streaming(struct uvc_streaming *stream) { int ret; ret = uvc_video_clock_init(&stream->clock); if (ret < 0) return ret; /* Commit the streaming parameters. */ ret = uvc_commit_video(stream, &stream->ctrl); if (ret < 0) goto error_commit; ret = uvc_video_start_transfer(stream, GFP_KERNEL); if (ret < 0) goto error_video; return 0; error_video: usb_set_interface(stream->dev->udev, stream->intfnum, 0); error_commit: uvc_video_clock_cleanup(&stream->clock); return ret; } void uvc_video_stop_streaming(struct uvc_streaming *stream) { uvc_video_stop_transfer(stream, 1); if (stream->intf->num_altsetting > 1) { usb_set_interface(stream->dev->udev, stream->intfnum, 0); } else { /* * UVC doesn't specify how to inform a bulk-based device * when the video stream is stopped. Windows sends a * CLEAR_FEATURE(HALT) request to the video streaming * bulk endpoint, mimic the same behaviour. */ unsigned int epnum = stream->header.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; unsigned int dir = stream->header.bEndpointAddress & USB_ENDPOINT_DIR_MASK; unsigned int pipe; pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir; usb_clear_halt(stream->dev->udev, pipe); } uvc_video_clock_cleanup(&stream->clock); } |
4 4 4 3 2 3 2 1 4 4 5 4 4 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 | // SPDX-License-Identifier: GPL-2.0-or-later /* * STK1160 driver * * Copyright (C) 2012 Ezequiel Garcia * <elezegarcia--a.t--gmail.com> * * Based on Easycap driver by R.M. Thomas * Copyright (C) 2010 R.M. Thomas * <rmthomas--a.t--sciolus.org> * * TODO: * * 1. Support stream at lower speed: lower frame rate or lower frame size. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <media/i2c/saa7115.h> #include "stk1160.h" #include "stk1160-reg.h" static unsigned int input; module_param(input, int, 0644); MODULE_PARM_DESC(input, "Set default input"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ezequiel Garcia"); MODULE_DESCRIPTION("STK1160 driver"); /* Devices supported by this driver */ static const struct usb_device_id stk1160_id_table[] = { { USB_DEVICE(0x05e1, 0x0408) }, { } }; MODULE_DEVICE_TABLE(usb, stk1160_id_table); /* saa7113 I2C address */ static unsigned short saa7113_addrs[] = { 0x4a >> 1, I2C_CLIENT_END }; /* * Read/Write stk registers */ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value) { int ret; int pipe = usb_rcvctrlpipe(dev->udev, 0); u8 *buf; *value = 0; buf = kmalloc(sizeof(u8), GFP_KERNEL); if (!buf) return -ENOMEM; ret = usb_control_msg(dev->udev, pipe, 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, reg, buf, sizeof(u8), 1000); if (ret < 0) { stk1160_err("read failed on reg 0x%x (%d)\n", reg, ret); kfree(buf); return ret; } *value = *buf; kfree(buf); return 0; } int stk1160_write_reg(struct stk1160 *dev, u16 reg, u16 value) { int ret; int pipe = usb_sndctrlpipe(dev->udev, 0); ret = usb_control_msg(dev->udev, pipe, 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, reg, NULL, 0, 1000); if (ret < 0) { stk1160_err("write failed on reg 0x%x (%d)\n", reg, ret); return ret; } return 0; } void stk1160_select_input(struct stk1160 *dev) { int route; static const u8 gctrl[] = { 0x98, 0x90, 0x88, 0x80, 0x98 }; if (dev->ctl_input == STK1160_SVIDEO_INPUT) route = SAA7115_SVIDEO3; else route = SAA7115_COMPOSITE0; if (dev->ctl_input < ARRAY_SIZE(gctrl)) { v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing, route, 0, 0); stk1160_write_reg(dev, STK1160_GCTRL, gctrl[dev->ctl_input]); } } /* TODO: We should break this into pieces */ static void stk1160_reg_reset(struct stk1160 *dev) { int i; static const struct regval ctl[] = { {STK1160_GCTRL+2, 0x0078}, {STK1160_RMCTL+1, 0x0000}, {STK1160_RMCTL+3, 0x0002}, {STK1160_PLLSO, 0x0010}, {STK1160_PLLSO+1, 0x0000}, {STK1160_PLLSO+2, 0x0014}, {STK1160_PLLSO+3, 0x000E}, {STK1160_PLLFD, 0x0046}, /* Timing generator setup */ {STK1160_TIGEN, 0x0012}, {STK1160_TICTL, 0x002D}, {STK1160_TICTL+1, 0x0001}, {STK1160_TICTL+2, 0x0000}, {STK1160_TICTL+3, 0x0000}, {STK1160_TIGEN, 0x0080}, {0xffff, 0xffff} }; for (i = 0; ctl[i].reg != 0xffff; i++) stk1160_write_reg(dev, ctl[i].reg, ctl[i].val); } static void stk1160_release(struct v4l2_device *v4l2_dev) { struct stk1160 *dev = container_of(v4l2_dev, struct stk1160, v4l2_dev); stk1160_dbg("releasing all resources\n"); stk1160_i2c_unregister(dev); v4l2_ctrl_handler_free(&dev->ctrl_handler); v4l2_device_unregister(&dev->v4l2_dev); mutex_destroy(&dev->v4l_lock); mutex_destroy(&dev->vb_queue_lock); kfree(dev->alt_max_pkt_size); kfree(dev); } /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) /* * Scan usb interface and populate max_pkt_size array * with information on each alternate setting. * The array should be allocated by the caller. */ static int stk1160_scan_usb(struct usb_interface *intf, struct usb_device *udev, unsigned int *max_pkt_size) { int i, e, sizedescr, size, ifnum; const struct usb_endpoint_descriptor *desc; bool has_video = false, has_audio = false; const char *speed; ifnum = intf->altsetting[0].desc.bInterfaceNumber; /* Get endpoints */ for (i = 0; i < intf->num_altsetting; i++) { for (e = 0; e < intf->altsetting[i].desc.bNumEndpoints; e++) { /* This isn't clear enough, at least to me */ desc = &intf->altsetting[i].endpoint[e].desc; sizedescr = le16_to_cpu(desc->wMaxPacketSize); size = sizedescr & 0x7ff; if (udev->speed == USB_SPEED_HIGH) size = size * hb_mult(sizedescr); if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) { switch (desc->bEndpointAddress) { case STK1160_EP_AUDIO: has_audio = true; break; case STK1160_EP_VIDEO: has_video = true; max_pkt_size[i] = size; break; } } } } /* Is this even possible? */ if (!(has_audio || has_video)) { dev_err(&udev->dev, "no audio or video endpoints found\n"); return -ENODEV; } switch (udev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_FULL: speed = "12"; break; case USB_SPEED_HIGH: speed = "480"; break; default: speed = "unknown"; } dev_info(&udev->dev, "New device %s %s @ %s Mbps (%04x:%04x, interface %d, class %d)\n", udev->manufacturer ? udev->manufacturer : "", udev->product ? udev->product : "", speed, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), ifnum, intf->altsetting->desc.bInterfaceNumber); /* This should never happen, since we rejected audio interfaces */ if (has_audio) dev_warn(&udev->dev, "audio interface %d found.\n\ This is not implemented by this driver,\ you should use snd-usb-audio instead\n", ifnum); if (has_video) dev_info(&udev->dev, "video interface %d found\n", ifnum); /* * Make sure we have 480 Mbps of bandwidth, otherwise things like * video stream wouldn't likely work, since 12 Mbps is generally * not enough even for most streams. */ if (udev->speed != USB_SPEED_HIGH) dev_warn(&udev->dev, "must be connected to a high-speed USB 2.0 port\n\ You may not be able to stream video smoothly\n"); return 0; } static int stk1160_probe(struct usb_interface *interface, const struct usb_device_id *id) { int rc = 0; unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */ struct usb_device *udev; struct stk1160 *dev; udev = interface_to_usbdev(interface); /* * Since usb audio class is supported by snd-usb-audio, * we reject audio interface. */ if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) return -ENODEV; /* Alloc an array for all possible max_pkt_size */ alt_max_pkt_size = kmalloc_array(interface->num_altsetting, sizeof(alt_max_pkt_size[0]), GFP_KERNEL); if (alt_max_pkt_size == NULL) return -ENOMEM; /* * Scan usb possibilities and populate alt_max_pkt_size array. * Also, check if device speed is fast enough. */ rc = stk1160_scan_usb(interface, udev, alt_max_pkt_size); if (rc < 0) { kfree(alt_max_pkt_size); return rc; } dev = kzalloc(sizeof(struct stk1160), GFP_KERNEL); if (dev == NULL) { kfree(alt_max_pkt_size); return -ENOMEM; } dev->alt_max_pkt_size = alt_max_pkt_size; dev->udev = udev; dev->num_alt = interface->num_altsetting; dev->ctl_input = input; /* We save struct device for debug purposes only */ dev->dev = &interface->dev; usb_set_intfdata(interface, dev); /* initialize videobuf2 stuff */ rc = stk1160_vb2_setup(dev); if (rc < 0) goto free_err; /* * There is no need to take any locks here in probe * because we register the device node as the *last* thing. */ spin_lock_init(&dev->buf_lock); mutex_init(&dev->v4l_lock); mutex_init(&dev->vb_queue_lock); rc = v4l2_ctrl_handler_init(&dev->ctrl_handler, 0); if (rc) { stk1160_err("v4l2_ctrl_handler_init failed (%d)\n", rc); goto free_err; } /* * We obtain a v4l2_dev but defer * registration of video device node as the last thing. * There is no need to set the name if we give a device struct */ dev->v4l2_dev.release = stk1160_release; dev->v4l2_dev.ctrl_handler = &dev->ctrl_handler; rc = v4l2_device_register(dev->dev, &dev->v4l2_dev); if (rc) { stk1160_err("v4l2_device_register failed (%d)\n", rc); goto free_ctrl; } rc = stk1160_i2c_register(dev); if (rc < 0) goto unreg_v4l2; /* * To the best of my knowledge stk1160 boards only have * saa7113, but it doesn't hurt to support them all. */ dev->sd_saa7115 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "saa7115_auto", 0, saa7113_addrs); /* i2c reset saa711x */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0); v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); /* reset stk1160 to default values */ stk1160_reg_reset(dev); /* select default input */ stk1160_select_input(dev); stk1160_ac97_setup(dev); rc = stk1160_video_register(dev); if (rc < 0) goto unreg_i2c; return 0; unreg_i2c: stk1160_i2c_unregister(dev); unreg_v4l2: v4l2_device_unregister(&dev->v4l2_dev); free_ctrl: v4l2_ctrl_handler_free(&dev->ctrl_handler); free_err: kfree(alt_max_pkt_size); kfree(dev); return rc; } static void stk1160_disconnect(struct usb_interface *interface) { struct stk1160 *dev; dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); /* * Wait until all current v4l2 operation are finished * then deallocate resources */ mutex_lock(&dev->vb_queue_lock); mutex_lock(&dev->v4l_lock); /* Here is the only place where isoc get released */ stk1160_uninit_isoc(dev); stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR); video_unregister_device(&dev->vdev); v4l2_device_disconnect(&dev->v4l2_dev); /* This way current users can detect device is gone */ dev->udev = NULL; mutex_unlock(&dev->v4l_lock); mutex_unlock(&dev->vb_queue_lock); /* * This calls stk1160_release if it's the last reference. * Otherwise, release is postponed until there are no users left. */ v4l2_device_put(&dev->v4l2_dev); } static struct usb_driver stk1160_usb_driver = { .name = "stk1160", .id_table = stk1160_id_table, .probe = stk1160_probe, .disconnect = stk1160_disconnect, }; module_usb_driver(stk1160_usb_driver); |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 | /* SPDX-License-Identifier: GPL-2.0-only */ /* Driver for Realtek RTS5139 USB card reader * * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. * * Author: * Roger Tseng <rogerable@realtek.com> */ #ifndef __RTSX_USB_H #define __RTSX_USB_H #include <linux/usb.h> #define DRV_NAME_RTSX_USB "rtsx_usb" #define DRV_NAME_RTSX_USB_SDMMC "rtsx_usb_sdmmc" #define DRV_NAME_RTSX_USB_MS "rtsx_usb_ms" /* related module names */ #define RTSX_USB_SD_CARD 0 #define RTSX_USB_MS_CARD 1 /* endpoint numbers */ #define EP_BULK_OUT 1 #define EP_BULK_IN 2 #define EP_INTR_IN 3 /* USB vendor requests */ #define RTSX_USB_REQ_REG_OP 0x00 #define RTSX_USB_REQ_POLL 0x02 /* miscellaneous parameters */ #define MIN_DIV_N 60 #define MAX_DIV_N 120 #define MAX_PHASE 15 #define RX_TUNING_CNT 3 #define QFN24 0 #define LQFP48 1 #define CHECK_PKG(ucr, pkg) ((ucr)->package == (pkg)) /* data structures */ struct rtsx_ucr { u16 vendor_id; u16 product_id; int package; u8 ic_version; bool is_rts5179; unsigned int cur_clk; u8 *cmd_buf; unsigned int cmd_idx; u8 *rsp_buf; struct usb_device *pusb_dev; struct usb_interface *pusb_intf; struct usb_sg_request current_sg; struct timer_list sg_timer; struct mutex dev_mutex; }; /* buffer size */ #define IOBUF_SIZE 1024 /* prototypes of exported functions */ extern int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status); extern int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); extern int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data); extern int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data); extern int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); extern void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, u16 reg_addr, u8 mask, u8 data); extern int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout); extern int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout); extern int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe, void *buf, unsigned int len, int use_sg, unsigned int *act_len, int timeout); extern int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); extern int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); extern int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock, u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card); /* card status */ #define SD_CD 0x01 #define MS_CD 0x02 #define XD_CD 0x04 #define CD_MASK (SD_CD | MS_CD | XD_CD) #define SD_WP 0x08 /* reader command field offset & parameters */ #define READ_REG_CMD 0 #define WRITE_REG_CMD 1 #define CHECK_REG_CMD 2 #define PACKET_TYPE 4 #define CNT_H 5 #define CNT_L 6 #define STAGE_FLAG 7 #define CMD_OFFSET 8 #define SEQ_WRITE_DATA_OFFSET 12 #define BATCH_CMD 0 #define SEQ_READ 1 #define SEQ_WRITE 2 #define STAGE_R 0x01 #define STAGE_DI 0x02 #define STAGE_DO 0x04 #define STAGE_MS_STATUS 0x08 #define STAGE_XD_STATUS 0x10 #define MODE_C 0x00 #define MODE_CR (STAGE_R) #define MODE_CDIR (STAGE_R | STAGE_DI) #define MODE_CDOR (STAGE_R | STAGE_DO) #define EP0_OP_SHIFT 14 #define EP0_READ_REG_CMD 2 #define EP0_WRITE_REG_CMD 3 #define rtsx_usb_cmd_hdr_tag(ucr) \ do { \ ucr->cmd_buf[0] = 'R'; \ ucr->cmd_buf[1] = 'T'; \ ucr->cmd_buf[2] = 'C'; \ ucr->cmd_buf[3] = 'R'; \ } while (0) static inline void rtsx_usb_init_cmd(struct rtsx_ucr *ucr) { rtsx_usb_cmd_hdr_tag(ucr); ucr->cmd_idx = 0; ucr->cmd_buf[PACKET_TYPE] = BATCH_CMD; } /* internal register address */ #define FPDCTL 0xFC00 #define SSC_DIV_N_0 0xFC07 #define SSC_CTL1 0xFC09 #define SSC_CTL2 0xFC0A #define CFG_MODE 0xFC0E #define CFG_MODE_1 0xFC0F #define RCCTL 0xFC14 #define SOF_WDOG 0xFC28 #define SYS_DUMMY0 0xFC30 #define MS_BLKEND 0xFD30 #define MS_READ_START 0xFD31 #define MS_READ_COUNT 0xFD32 #define MS_WRITE_START 0xFD33 #define MS_WRITE_COUNT 0xFD34 #define MS_COMMAND 0xFD35 #define MS_OLD_BLOCK_0 0xFD36 #define MS_OLD_BLOCK_1 0xFD37 #define MS_NEW_BLOCK_0 0xFD38 #define MS_NEW_BLOCK_1 0xFD39 #define MS_LOG_BLOCK_0 0xFD3A #define MS_LOG_BLOCK_1 0xFD3B #define MS_BUS_WIDTH 0xFD3C #define MS_PAGE_START 0xFD3D #define MS_PAGE_LENGTH 0xFD3E #define MS_CFG 0xFD40 #define MS_TPC 0xFD41 #define MS_TRANS_CFG 0xFD42 #define MS_TRANSFER 0xFD43 #define MS_INT_REG 0xFD44 #define MS_BYTE_CNT 0xFD45 #define MS_SECTOR_CNT_L 0xFD46 #define MS_SECTOR_CNT_H 0xFD47 #define MS_DBUS_H 0xFD48 #define CARD_DMA1_CTL 0xFD5C #define CARD_PULL_CTL1 0xFD60 #define CARD_PULL_CTL2 0xFD61 #define CARD_PULL_CTL3 0xFD62 #define CARD_PULL_CTL4 0xFD63 #define CARD_PULL_CTL5 0xFD64 #define CARD_PULL_CTL6 0xFD65 #define CARD_EXIST 0xFD6F #define CARD_INT_PEND 0xFD71 #define LDO_POWER_CFG 0xFD7B #define SD_CFG1 0xFDA0 #define SD_CFG2 0xFDA1 #define SD_CFG3 0xFDA2 #define SD_STAT1 0xFDA3 #define SD_STAT2 0xFDA4 #define SD_BUS_STAT 0xFDA5 #define SD_PAD_CTL 0xFDA6 #define SD_SAMPLE_POINT_CTL 0xFDA7 #define SD_PUSH_POINT_CTL 0xFDA8 #define SD_CMD0 0xFDA9 #define SD_CMD1 0xFDAA #define SD_CMD2 0xFDAB #define SD_CMD3 0xFDAC #define SD_CMD4 0xFDAD #define SD_CMD5 0xFDAE #define SD_BYTE_CNT_L 0xFDAF #define SD_BYTE_CNT_H 0xFDB0 #define SD_BLOCK_CNT_L 0xFDB1 #define SD_BLOCK_CNT_H 0xFDB2 #define SD_TRANSFER 0xFDB3 #define SD_CMD_STATE 0xFDB5 #define SD_DATA_STATE 0xFDB6 #define SD_VPCLK0_CTL 0xFC2A #define SD_VPCLK1_CTL 0xFC2B #define SD_DCMPS0_CTL 0xFC2C #define SD_DCMPS1_CTL 0xFC2D #define CARD_DMA1_CTL 0xFD5C #define HW_VERSION 0xFC01 #define SSC_CLK_FPGA_SEL 0xFC02 #define CLK_DIV 0xFC03 #define SFSM_ED 0xFC04 #define CD_DEGLITCH_WIDTH 0xFC20 #define CD_DEGLITCH_EN 0xFC21 #define AUTO_DELINK_EN 0xFC23 #define FPGA_PULL_CTL 0xFC1D #define CARD_CLK_SOURCE 0xFC2E #define CARD_SHARE_MODE 0xFD51 #define CARD_DRIVE_SEL 0xFD52 #define CARD_STOP 0xFD53 #define CARD_OE 0xFD54 #define CARD_AUTO_BLINK 0xFD55 #define CARD_GPIO 0xFD56 #define SD30_DRIVE_SEL 0xFD57 #define CARD_DATA_SOURCE 0xFD5D #define CARD_SELECT 0xFD5E #define CARD_CLK_EN 0xFD79 #define CARD_PWR_CTL 0xFD7A #define OCPCTL 0xFD80 #define OCPPARA1 0xFD81 #define OCPPARA2 0xFD82 #define OCPSTAT 0xFD83 #define HS_USB_STAT 0xFE01 #define HS_VCONTROL 0xFE26 #define HS_VSTAIN 0xFE27 #define HS_VLOADM 0xFE28 #define HS_VSTAOUT 0xFE29 #define MC_IRQ 0xFF00 #define MC_IRQEN 0xFF01 #define MC_FIFO_CTL 0xFF02 #define MC_FIFO_BC0 0xFF03 #define MC_FIFO_BC1 0xFF04 #define MC_FIFO_STAT 0xFF05 #define MC_FIFO_MODE 0xFF06 #define MC_FIFO_RD_PTR0 0xFF07 #define MC_FIFO_RD_PTR1 0xFF08 #define MC_DMA_CTL 0xFF10 #define MC_DMA_TC0 0xFF11 #define MC_DMA_TC1 0xFF12 #define MC_DMA_TC2 0xFF13 #define MC_DMA_TC3 0xFF14 #define MC_DMA_RST 0xFF15 #define RBUF_SIZE_MASK 0xFBFF #define RBUF_BASE 0xF000 #define PPBUF_BASE1 0xF800 #define PPBUF_BASE2 0xFA00 /* internal register value macros */ #define POWER_OFF 0x03 #define PARTIAL_POWER_ON 0x02 #define POWER_ON 0x00 #define POWER_MASK 0x03 #define LDO3318_PWR_MASK 0x0C #define LDO_ON 0x00 #define LDO_SUSPEND 0x08 #define LDO_OFF 0x0C #define DV3318_AUTO_PWR_OFF 0x10 #define FORCE_LDO_POWERB 0x60 /* LDO_POWER_CFG */ #define TUNE_SD18_MASK 0x1C #define TUNE_SD18_1V7 0x00 #define TUNE_SD18_1V8 (0x01 << 2) #define TUNE_SD18_1V9 (0x02 << 2) #define TUNE_SD18_2V0 (0x03 << 2) #define TUNE_SD18_2V7 (0x04 << 2) #define TUNE_SD18_2V8 (0x05 << 2) #define TUNE_SD18_2V9 (0x06 << 2) #define TUNE_SD18_3V3 (0x07 << 2) /* CLK_DIV */ #define CLK_CHANGE 0x80 #define CLK_DIV_1 0x00 #define CLK_DIV_2 0x01 #define CLK_DIV_4 0x02 #define CLK_DIV_8 0x03 #define SSC_POWER_MASK 0x01 #define SSC_POWER_DOWN 0x01 #define SSC_POWER_ON 0x00 #define FPGA_VER 0x80 #define HW_VER_MASK 0x0F #define EXTEND_DMA1_ASYNC_SIGNAL 0x02 /* CFG_MODE*/ #define XTAL_FREE 0x80 #define CLK_MODE_MASK 0x03 #define CLK_MODE_12M_XTAL 0x00 #define CLK_MODE_NON_XTAL 0x01 #define CLK_MODE_24M_OSC 0x02 #define CLK_MODE_48M_OSC 0x03 /* CFG_MODE_1*/ #define RTS5179 0x02 #define NYET_EN 0x01 #define NYET_MSAK 0x01 #define SD30_DRIVE_MASK 0x07 #define SD20_DRIVE_MASK 0x03 #define DISABLE_SD_CD 0x08 #define DISABLE_MS_CD 0x10 #define DISABLE_XD_CD 0x20 #define SD_CD_DEGLITCH_EN 0x01 #define MS_CD_DEGLITCH_EN 0x02 #define XD_CD_DEGLITCH_EN 0x04 #define CARD_SHARE_LQFP48 0x04 #define CARD_SHARE_QFN24 0x00 #define CARD_SHARE_LQFP_SEL 0x04 #define CARD_SHARE_XD 0x00 #define CARD_SHARE_SD 0x01 #define CARD_SHARE_MS 0x02 #define CARD_SHARE_MASK 0x03 /* SD30_DRIVE_SEL */ #define DRIVER_TYPE_A 0x05 #define DRIVER_TYPE_B 0x03 #define DRIVER_TYPE_C 0x02 #define DRIVER_TYPE_D 0x01 /* SD_BUS_STAT */ #define SD_CLK_TOGGLE_EN 0x80 #define SD_CLK_FORCE_STOP 0x40 #define SD_DAT3_STATUS 0x10 #define SD_DAT2_STATUS 0x08 #define SD_DAT1_STATUS 0x04 #define SD_DAT0_STATUS 0x02 #define SD_CMD_STATUS 0x01 /* SD_PAD_CTL */ #define SD_IO_USING_1V8 0x80 #define SD_IO_USING_3V3 0x7F #define TYPE_A_DRIVING 0x00 #define TYPE_B_DRIVING 0x01 #define TYPE_C_DRIVING 0x02 #define TYPE_D_DRIVING 0x03 /* CARD_CLK_EN */ #define SD_CLK_EN 0x04 #define MS_CLK_EN 0x08 /* CARD_SELECT */ #define SD_MOD_SEL 2 #define MS_MOD_SEL 3 /* CARD_SHARE_MODE */ #define CARD_SHARE_LQFP48 0x04 #define CARD_SHARE_QFN24 0x00 #define CARD_SHARE_LQFP_SEL 0x04 #define CARD_SHARE_XD 0x00 #define CARD_SHARE_SD 0x01 #define CARD_SHARE_MS 0x02 #define CARD_SHARE_MASK 0x03 /* SSC_CTL1 */ #define SSC_RSTB 0x80 #define SSC_8X_EN 0x40 #define SSC_FIX_FRAC 0x20 #define SSC_SEL_1M 0x00 #define SSC_SEL_2M 0x08 #define SSC_SEL_4M 0x10 #define SSC_SEL_8M 0x18 /* SSC_CTL2 */ #define SSC_DEPTH_MASK 0x03 #define SSC_DEPTH_DISALBE 0x00 #define SSC_DEPTH_2M 0x01 #define SSC_DEPTH_1M 0x02 #define SSC_DEPTH_512K 0x03 /* SD_VPCLK0_CTL */ #define PHASE_CHANGE 0x80 #define PHASE_NOT_RESET 0x40 /* SD_TRANSFER */ #define SD_TRANSFER_START 0x80 #define SD_TRANSFER_END 0x40 #define SD_STAT_IDLE 0x20 #define SD_TRANSFER_ERR 0x10 #define SD_TM_NORMAL_WRITE 0x00 #define SD_TM_AUTO_WRITE_3 0x01 #define SD_TM_AUTO_WRITE_4 0x02 #define SD_TM_AUTO_READ_3 0x05 #define SD_TM_AUTO_READ_4 0x06 #define SD_TM_CMD_RSP 0x08 #define SD_TM_AUTO_WRITE_1 0x09 #define SD_TM_AUTO_WRITE_2 0x0A #define SD_TM_NORMAL_READ 0x0C #define SD_TM_AUTO_READ_1 0x0D #define SD_TM_AUTO_READ_2 0x0E #define SD_TM_AUTO_TUNING 0x0F /* SD_CFG1 */ #define SD_CLK_DIVIDE_0 0x00 #define SD_CLK_DIVIDE_256 0xC0 #define SD_CLK_DIVIDE_128 0x80 #define SD_CLK_DIVIDE_MASK 0xC0 #define SD_BUS_WIDTH_1BIT 0x00 #define SD_BUS_WIDTH_4BIT 0x01 #define SD_BUS_WIDTH_8BIT 0x02 #define SD_ASYNC_FIFO_RST 0x10 #define SD_20_MODE 0x00 #define SD_DDR_MODE 0x04 #define SD_30_MODE 0x08 /* SD_CFG2 */ #define SD_CALCULATE_CRC7 0x00 #define SD_NO_CALCULATE_CRC7 0x80 #define SD_CHECK_CRC16 0x00 #define SD_NO_CHECK_CRC16 0x40 #define SD_WAIT_CRC_TO_EN 0x20 #define SD_WAIT_BUSY_END 0x08 #define SD_NO_WAIT_BUSY_END 0x00 #define SD_CHECK_CRC7 0x00 #define SD_NO_CHECK_CRC7 0x04 #define SD_RSP_LEN_0 0x00 #define SD_RSP_LEN_6 0x01 #define SD_RSP_LEN_17 0x02 #define SD_RSP_TYPE_R0 0x04 #define SD_RSP_TYPE_R1 0x01 #define SD_RSP_TYPE_R1b 0x09 #define SD_RSP_TYPE_R2 0x02 #define SD_RSP_TYPE_R3 0x05 #define SD_RSP_TYPE_R4 0x05 #define SD_RSP_TYPE_R5 0x01 #define SD_RSP_TYPE_R6 0x01 #define SD_RSP_TYPE_R7 0x01 /* SD_STAT1 */ #define SD_CRC7_ERR 0x80 #define SD_CRC16_ERR 0x40 #define SD_CRC_WRITE_ERR 0x20 #define SD_CRC_WRITE_ERR_MASK 0x1C #define GET_CRC_TIME_OUT 0x02 #define SD_TUNING_COMPARE_ERR 0x01 /* SD_DATA_STATE */ #define SD_DATA_IDLE 0x80 /* CARD_DATA_SOURCE */ #define PINGPONG_BUFFER 0x01 #define RING_BUFFER 0x00 /* CARD_OE */ #define SD_OUTPUT_EN 0x04 #define MS_OUTPUT_EN 0x08 /* CARD_STOP */ #define SD_STOP 0x04 #define MS_STOP 0x08 #define SD_CLR_ERR 0x40 #define MS_CLR_ERR 0x80 /* CARD_CLK_SOURCE */ #define CRC_FIX_CLK (0x00 << 0) #define CRC_VAR_CLK0 (0x01 << 0) #define CRC_VAR_CLK1 (0x02 << 0) #define SD30_FIX_CLK (0x00 << 2) #define SD30_VAR_CLK0 (0x01 << 2) #define SD30_VAR_CLK1 (0x02 << 2) #define SAMPLE_FIX_CLK (0x00 << 4) #define SAMPLE_VAR_CLK0 (0x01 << 4) #define SAMPLE_VAR_CLK1 (0x02 << 4) /* SD_SAMPLE_POINT_CTL */ #define DDR_FIX_RX_DAT 0x00 #define DDR_VAR_RX_DAT 0x80 #define DDR_FIX_RX_DAT_EDGE 0x00 #define DDR_FIX_RX_DAT_14_DELAY 0x40 #define DDR_FIX_RX_CMD 0x00 #define DDR_VAR_RX_CMD 0x20 #define DDR_FIX_RX_CMD_POS_EDGE 0x00 #define DDR_FIX_RX_CMD_14_DELAY 0x10 #define SD20_RX_POS_EDGE 0x00 #define SD20_RX_14_DELAY 0x08 #define SD20_RX_SEL_MASK 0x08 /* SD_PUSH_POINT_CTL */ #define DDR_FIX_TX_CMD_DAT 0x00 #define DDR_VAR_TX_CMD_DAT 0x80 #define DDR_FIX_TX_DAT_14_TSU 0x00 #define DDR_FIX_TX_DAT_12_TSU 0x40 #define DDR_FIX_TX_CMD_NEG_EDGE 0x00 #define DDR_FIX_TX_CMD_14_AHEAD 0x20 #define SD20_TX_NEG_EDGE 0x00 #define SD20_TX_14_AHEAD 0x10 #define SD20_TX_SEL_MASK 0x10 #define DDR_VAR_SDCLK_POL_SWAP 0x01 /* MS_CFG */ #define SAMPLE_TIME_RISING 0x00 #define SAMPLE_TIME_FALLING 0x80 #define PUSH_TIME_DEFAULT 0x00 #define PUSH_TIME_ODD 0x40 #define NO_EXTEND_TOGGLE 0x00 #define EXTEND_TOGGLE_CHK 0x20 #define MS_BUS_WIDTH_1 0x00 #define MS_BUS_WIDTH_4 0x10 #define MS_BUS_WIDTH_8 0x18 #define MS_2K_SECTOR_MODE 0x04 #define MS_512_SECTOR_MODE 0x00 #define MS_TOGGLE_TIMEOUT_EN 0x00 #define MS_TOGGLE_TIMEOUT_DISEN 0x01 #define MS_NO_CHECK_INT 0x02 /* MS_TRANS_CFG */ #define WAIT_INT 0x80 #define NO_WAIT_INT 0x00 #define NO_AUTO_READ_INT_REG 0x00 #define AUTO_READ_INT_REG 0x40 #define MS_CRC16_ERR 0x20 #define MS_RDY_TIMEOUT 0x10 #define MS_INT_CMDNK 0x08 #define MS_INT_BREQ 0x04 #define MS_INT_ERR 0x02 #define MS_INT_CED 0x01 /* MS_TRANSFER */ #define MS_TRANSFER_START 0x80 #define MS_TRANSFER_END 0x40 #define MS_TRANSFER_ERR 0x20 #define MS_BS_STATE 0x10 #define MS_TM_READ_BYTES 0x00 #define MS_TM_NORMAL_READ 0x01 #define MS_TM_WRITE_BYTES 0x04 #define MS_TM_NORMAL_WRITE 0x05 #define MS_TM_AUTO_READ 0x08 #define MS_TM_AUTO_WRITE 0x0C #define MS_TM_SET_CMD 0x06 #define MS_TM_COPY_PAGE 0x07 #define MS_TM_MULTI_READ 0x02 #define MS_TM_MULTI_WRITE 0x03 /* MC_FIFO_CTL */ #define FIFO_FLUSH 0x01 /* MC_DMA_RST */ #define DMA_RESET 0x01 /* MC_DMA_CTL */ #define DMA_TC_EQ_0 0x80 #define DMA_DIR_TO_CARD 0x00 #define DMA_DIR_FROM_CARD 0x02 #define DMA_EN 0x01 #define DMA_128 (0 << 2) #define DMA_256 (1 << 2) #define DMA_512 (2 << 2) #define DMA_1024 (3 << 2) #define DMA_PACK_SIZE_MASK 0x0C /* CARD_INT_PEND */ #define XD_INT 0x10 #define MS_INT 0x08 #define SD_INT 0x04 /* LED operations*/ static inline int rtsx_usb_turn_on_led(struct rtsx_ucr *ucr) { return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x02); } static inline int rtsx_usb_turn_off_led(struct rtsx_ucr *ucr) { return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x03); } /* HW error clearing */ static inline void rtsx_usb_clear_fsm_err(struct rtsx_ucr *ucr) { rtsx_usb_ep0_write_register(ucr, SFSM_ED, 0xf8, 0xf8); } static inline void rtsx_usb_clear_dma_err(struct rtsx_ucr *ucr) { rtsx_usb_ep0_write_register(ucr, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH); rtsx_usb_ep0_write_register(ucr, MC_DMA_RST, DMA_RESET, DMA_RESET); } #endif /* __RTS51139_H */ |
3225 1000 341 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Integer base 2 logarithm calculation * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_LOG2_H #define _LINUX_LOG2_H #include <linux/types.h> #include <linux/bitops.h> /* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented * more efficiently than using fls() and fls64() * - the arch is not required to handle n==0 if implementing the fallback */ #ifndef CONFIG_ARCH_HAS_ILOG2_U32 static __always_inline __attribute__((const)) int __ilog2_u32(u32 n) { return fls(n) - 1; } #endif #ifndef CONFIG_ARCH_HAS_ILOG2_U64 static __always_inline __attribute__((const)) int __ilog2_u64(u64 n) { return fls64(n) - 1; } #endif /** * is_power_of_2() - check if a value is a power of two * @n: the value to check * * Determine whether some value is a power of two, where zero is * *not* considered a power of two. * Return: true if @n is a power of 2, otherwise false. */ static __always_inline __attribute__((const)) bool is_power_of_2(unsigned long n) { return (n != 0 && ((n & (n - 1)) == 0)); } /** * __roundup_pow_of_two() - round up to nearest power of two * @n: value to round up */ static inline __attribute__((const)) unsigned long __roundup_pow_of_two(unsigned long n) { return 1UL << fls_long(n - 1); } /** * __rounddown_pow_of_two() - round down to nearest power of two * @n: value to round down */ static inline __attribute__((const)) unsigned long __rounddown_pow_of_two(unsigned long n) { return 1UL << (fls_long(n) - 1); } /** * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value * @n: parameter * * Use this where sparse expects a true constant expression, e.g. for array * indices. */ #define const_ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ (n) & (1ULL << 60) ? 60 : \ (n) & (1ULL << 59) ? 59 : \ (n) & (1ULL << 58) ? 58 : \ (n) & (1ULL << 57) ? 57 : \ (n) & (1ULL << 56) ? 56 : \ (n) & (1ULL << 55) ? 55 : \ (n) & (1ULL << 54) ? 54 : \ (n) & (1ULL << 53) ? 53 : \ (n) & (1ULL << 52) ? 52 : \ (n) & (1ULL << 51) ? 51 : \ (n) & (1ULL << 50) ? 50 : \ (n) & (1ULL << 49) ? 49 : \ (n) & (1ULL << 48) ? 48 : \ (n) & (1ULL << 47) ? 47 : \ (n) & (1ULL << 46) ? 46 : \ (n) & (1ULL << 45) ? 45 : \ (n) & (1ULL << 44) ? 44 : \ (n) & (1ULL << 43) ? 43 : \ (n) & (1ULL << 42) ? 42 : \ (n) & (1ULL << 41) ? 41 : \ (n) & (1ULL << 40) ? 40 : \ (n) & (1ULL << 39) ? 39 : \ (n) & (1ULL << 38) ? 38 : \ (n) & (1ULL << 37) ? 37 : \ (n) & (1ULL << 36) ? 36 : \ (n) & (1ULL << 35) ? 35 : \ (n) & (1ULL << 34) ? 34 : \ (n) & (1ULL << 33) ? 33 : \ (n) & (1ULL << 32) ? 32 : \ (n) & (1ULL << 31) ? 31 : \ (n) & (1ULL << 30) ? 30 : \ (n) & (1ULL << 29) ? 29 : \ (n) & (1ULL << 28) ? 28 : \ (n) & (1ULL << 27) ? 27 : \ (n) & (1ULL << 26) ? 26 : \ (n) & (1ULL << 25) ? 25 : \ (n) & (1ULL << 24) ? 24 : \ (n) & (1ULL << 23) ? 23 : \ (n) & (1ULL << 22) ? 22 : \ (n) & (1ULL << 21) ? 21 : \ (n) & (1ULL << 20) ? 20 : \ (n) & (1ULL << 19) ? 19 : \ (n) & (1ULL << 18) ? 18 : \ (n) & (1ULL << 17) ? 17 : \ (n) & (1ULL << 16) ? 16 : \ (n) & (1ULL << 15) ? 15 : \ (n) & (1ULL << 14) ? 14 : \ (n) & (1ULL << 13) ? 13 : \ (n) & (1ULL << 12) ? 12 : \ (n) & (1ULL << 11) ? 11 : \ (n) & (1ULL << 10) ? 10 : \ (n) & (1ULL << 9) ? 9 : \ (n) & (1ULL << 8) ? 8 : \ (n) & (1ULL << 7) ? 7 : \ (n) & (1ULL << 6) ? 6 : \ (n) & (1ULL << 5) ? 5 : \ (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ 1) : \ -1) /** * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value * @n: parameter * * constant-capable log of base 2 calculation * - this can be used to initialise global variables from constant data, hence * the massive ternary operator construction * * selects the appropriately-sized optimised version depending on sizeof(n) */ #define ilog2(n) \ ( \ __builtin_constant_p(n) ? \ ((n) < 2 ? 0 : \ 63 - __builtin_clzll(n)) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ ) /** * roundup_pow_of_two - round the given value up to nearest power of two * @n: parameter * * round the given value up to the nearest power of two * - the result is undefined when n == 0 * - this can be used to initialise global variables from constant data */ #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ ) /** * rounddown_pow_of_two - round the given value down to nearest power of two * @n: parameter * * round the given value down to the nearest power of two * - the result is undefined when n == 0 * - this can be used to initialise global variables from constant data */ #define rounddown_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ (1UL << ilog2(n))) : \ __rounddown_pow_of_two(n) \ ) static inline __attribute_const__ int __order_base_2(unsigned long n) { return n > 1 ? ilog2(n - 1) + 1 : 0; } /** * order_base_2 - calculate the (rounded up) base 2 order of the argument * @n: parameter * * The first few values calculated by this routine: * ob2(0) = 0 * ob2(1) = 0 * ob2(2) = 1 * ob2(3) = 2 * ob2(4) = 2 * ob2(5) = 3 * ... and so on. */ #define order_base_2(n) \ ( \ __builtin_constant_p(n) ? ( \ ((n) == 0 || (n) == 1) ? 0 : \ ilog2((n) - 1) + 1) : \ __order_base_2(n) \ ) static inline __attribute__((const)) int __bits_per(unsigned long n) { if (n < 2) return 1; if (is_power_of_2(n)) return order_base_2(n) + 1; return order_base_2(n); } /** * bits_per - calculate the number of bits required for the argument * @n: parameter * * This is constant-capable and can be used for compile time * initializations, e.g bitfields. * * The first few values calculated by this routine: * bf(0) = 1 * bf(1) = 1 * bf(2) = 2 * bf(3) = 2 * bf(4) = 3 * ... and so on. */ #define bits_per(n) \ ( \ __builtin_constant_p(n) ? ( \ ((n) == 0 || (n) == 1) \ ? 1 : ilog2(n) + 1 \ ) : \ __bits_per(n) \ ) #endif /* _LINUX_LOG2_H */ |
4 4 4 4 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 | // SPDX-License-Identifier: GPL-2.0-only /* * mac80211 glue code for mac80211 Prism54 drivers * * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de> * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). */ #include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/module.h> #include <net/mac80211.h> #include "p54.h" #include "lmac.h" static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); MODULE_DESCRIPTION("Softmac Prism54 common code"); MODULE_LICENSE("GPL"); MODULE_ALIAS("prism54common"); static int p54_sta_add_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct p54_common *priv = hw->priv; /* * Notify the firmware that we don't want or we don't * need to buffer frames for this station anymore. */ p54_sta_unlock(priv, sta->addr); return 0; } static void p54_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, enum sta_notify_cmd notify_cmd, struct ieee80211_sta *sta) { struct p54_common *priv = dev->priv; switch (notify_cmd) { case STA_NOTIFY_AWAKE: /* update the firmware's filter table */ p54_sta_unlock(priv, sta->addr); break; default: break; } } static int p54_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, bool set) { struct p54_common *priv = dev->priv; return p54_update_beacon_tim(priv, sta->aid, set); } u8 *p54_find_ie(struct sk_buff *skb, u8 ie) { struct ieee80211_mgmt *mgmt = (void *)skb->data; u8 *pos, *end; if (skb->len <= sizeof(mgmt)) return NULL; pos = (u8 *)mgmt->u.beacon.variable; end = skb->data + skb->len; while (pos < end) { if (pos + 2 + pos[1] > end) return NULL; if (pos[0] == ie) return pos; pos += 2 + pos[1]; } return NULL; } static int p54_beacon_format_ie_tim(struct sk_buff *skb) { /* * the good excuse for this mess is ... the firmware. * The dummy TIM MUST be at the end of the beacon frame, * because it'll be overwritten! */ u8 *tim; u8 dtim_len; u8 dtim_period; u8 *next; tim = p54_find_ie(skb, WLAN_EID_TIM); if (!tim) return 0; dtim_len = tim[1]; dtim_period = tim[3]; next = tim + 2 + dtim_len; if (dtim_len < 3) return -EINVAL; memmove(tim, next, skb_tail_pointer(skb) - next); tim = skb_tail_pointer(skb) - (dtim_len + 2); /* add the dummy at the end */ tim[0] = WLAN_EID_TIM; tim[1] = 3; tim[2] = 0; tim[3] = dtim_period; tim[4] = 0; if (dtim_len > 3) skb_trim(skb, skb->len - (dtim_len - 3)); return 0; } static int p54_beacon_update(struct p54_common *priv, struct ieee80211_vif *vif) { struct ieee80211_tx_control control = { }; struct sk_buff *beacon; int ret; beacon = ieee80211_beacon_get(priv->hw, vif, 0); if (!beacon) return -ENOMEM; ret = p54_beacon_format_ie_tim(beacon); if (ret) return ret; /* * During operation, the firmware takes care of beaconing. * The driver only needs to upload a new beacon template, once * the template was changed by the stack or userspace. * * LMAC API 3.2.2 also specifies that the driver does not need * to cancel the old beacon template by hand, instead the firmware * will release the previous one through the feedback mechanism. */ p54_tx_80211(priv->hw, &control, beacon); priv->tsf_high32 = 0; priv->tsf_low32 = 0; return 0; } static int p54_start(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; int err; mutex_lock(&priv->conf_mutex); err = priv->open(dev); if (err) goto out; P54_SET_QUEUE(priv->qos_params[0], 0x0002, 0x0003, 0x0007, 47); P54_SET_QUEUE(priv->qos_params[1], 0x0002, 0x0007, 0x000f, 94); P54_SET_QUEUE(priv->qos_params[2], 0x0003, 0x000f, 0x03ff, 0); P54_SET_QUEUE(priv->qos_params[3], 0x0007, 0x000f, 0x03ff, 0); err = p54_set_edcf(priv); if (err) goto out; eth_broadcast_addr(priv->bssid); priv->mode = NL80211_IFTYPE_MONITOR; err = p54_setup_mac(priv); if (err) { priv->mode = NL80211_IFTYPE_UNSPECIFIED; goto out; } ieee80211_queue_delayed_work(dev, &priv->work, 0); priv->softled_state = 0; err = p54_set_leds(priv); out: mutex_unlock(&priv->conf_mutex); return err; } static void p54_stop(struct ieee80211_hw *dev, bool suspend) { struct p54_common *priv = dev->priv; int i; priv->mode = NL80211_IFTYPE_UNSPECIFIED; priv->softled_state = 0; cancel_delayed_work_sync(&priv->work); mutex_lock(&priv->conf_mutex); p54_set_leds(priv); priv->stop(dev); skb_queue_purge(&priv->tx_pending); skb_queue_purge(&priv->tx_queue); for (i = 0; i < P54_QUEUE_NUM; i++) { priv->tx_stats[i].count = 0; priv->tx_stats[i].len = 0; } priv->beacon_req_id = cpu_to_le32(0); priv->tsf_high32 = priv->tsf_low32 = 0; mutex_unlock(&priv->conf_mutex); } static int p54_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct p54_common *priv = dev->priv; int err; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; mutex_lock(&priv->conf_mutex); if (priv->mode != NL80211_IFTYPE_MONITOR) { mutex_unlock(&priv->conf_mutex); return -EOPNOTSUPP; } priv->vif = vif; switch (vif->type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: priv->mode = vif->type; break; default: mutex_unlock(&priv->conf_mutex); return -EOPNOTSUPP; } memcpy(priv->mac_addr, vif->addr, ETH_ALEN); err = p54_setup_mac(priv); mutex_unlock(&priv->conf_mutex); return err; } static void p54_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct p54_common *priv = dev->priv; mutex_lock(&priv->conf_mutex); priv->vif = NULL; /* * LMAC API 3.2.2 states that any active beacon template must be * canceled by the driver before attempting a mode transition. */ if (le32_to_cpu(priv->beacon_req_id) != 0) { p54_tx_cancel(priv, priv->beacon_req_id); wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ); } priv->mode = NL80211_IFTYPE_MONITOR; eth_zero_addr(priv->mac_addr); eth_zero_addr(priv->bssid); p54_setup_mac(priv); mutex_unlock(&priv->conf_mutex); } static int p54_wait_for_stats(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; int ret; priv->update_stats = true; ret = p54_fetch_statistics(priv); if (ret) return ret; ret = wait_for_completion_interruptible_timeout(&priv->stat_comp, HZ); if (ret == 0) return -ETIMEDOUT; return 0; } static void p54_reset_stats(struct p54_common *priv) { struct ieee80211_channel *chan = priv->curchan; if (chan) { struct survey_info *info = &priv->survey[chan->hw_value]; /* only reset channel statistics, don't touch .filled, etc. */ info->time = 0; info->time_busy = 0; info->time_tx = 0; } priv->update_stats = true; priv->survey_raw.active = 0; priv->survey_raw.cca = 0; priv->survey_raw.tx = 0; } static int p54_config(struct ieee80211_hw *dev, u32 changed) { int ret = 0; struct p54_common *priv = dev->priv; struct ieee80211_conf *conf = &dev->conf; mutex_lock(&priv->conf_mutex); if (changed & IEEE80211_CONF_CHANGE_POWER) priv->output_power = conf->power_level << 2; if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { struct ieee80211_channel *oldchan; WARN_ON(p54_wait_for_stats(dev)); oldchan = priv->curchan; priv->curchan = NULL; ret = p54_scan(priv, P54_SCAN_EXIT, 0); if (ret) { priv->curchan = oldchan; goto out; } /* * TODO: Use the LM_SCAN_TRAP to determine the current * operating channel. */ priv->curchan = priv->hw->conf.chandef.chan; p54_reset_stats(priv); WARN_ON(p54_fetch_statistics(priv)); } if (changed & IEEE80211_CONF_CHANGE_PS) { WARN_ON(p54_wait_for_stats(dev)); ret = p54_set_ps(priv); if (ret) goto out; WARN_ON(p54_wait_for_stats(dev)); } if (changed & IEEE80211_CONF_CHANGE_IDLE) { WARN_ON(p54_wait_for_stats(dev)); ret = p54_setup_mac(priv); if (ret) goto out; WARN_ON(p54_wait_for_stats(dev)); } out: mutex_unlock(&priv->conf_mutex); return ret; } static u64 p54_prepare_multicast(struct ieee80211_hw *dev, struct netdev_hw_addr_list *mc_list) { struct p54_common *priv = dev->priv; struct netdev_hw_addr *ha; int i; BUILD_BUG_ON(ARRAY_SIZE(priv->mc_maclist) != ARRAY_SIZE(((struct p54_group_address_table *)NULL)->mac_list)); /* * The first entry is reserved for the global broadcast MAC. * Otherwise the firmware will drop it and ARP will no longer work. */ i = 1; priv->mc_maclist_num = netdev_hw_addr_list_count(mc_list) + i; netdev_hw_addr_list_for_each(ha, mc_list) { memcpy(&priv->mc_maclist[i], ha->addr, ETH_ALEN); i++; if (i >= ARRAY_SIZE(priv->mc_maclist)) break; } return 1; /* update */ } static void p54_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct p54_common *priv = dev->priv; *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS; priv->filter_flags = *total_flags; if (changed_flags & FIF_OTHER_BSS) p54_setup_mac(priv); if (changed_flags & FIF_ALLMULTI || multicast) p54_set_groupfilter(priv); } static int p54_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) { struct p54_common *priv = dev->priv; int ret; mutex_lock(&priv->conf_mutex); P54_SET_QUEUE(priv->qos_params[queue], params->aifs, params->cw_min, params->cw_max, params->txop); ret = p54_set_edcf(priv); mutex_unlock(&priv->conf_mutex); return ret; } static void p54_work(struct work_struct *work) { struct p54_common *priv = container_of(work, struct p54_common, work.work); if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return ; /* * TODO: walk through tx_queue and do the following tasks * 1. initiate bursts. * 2. cancel stuck frames / reset the device if necessary. */ mutex_lock(&priv->conf_mutex); WARN_ON_ONCE(p54_fetch_statistics(priv)); mutex_unlock(&priv->conf_mutex); } static int p54_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats) { struct p54_common *priv = dev->priv; memcpy(stats, &priv->stats, sizeof(*stats)); return 0; } static void p54_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { struct p54_common *priv = dev->priv; mutex_lock(&priv->conf_mutex); if (changed & BSS_CHANGED_BSSID) { memcpy(priv->bssid, info->bssid, ETH_ALEN); p54_setup_mac(priv); } if (changed & BSS_CHANGED_BEACON) { p54_scan(priv, P54_SCAN_EXIT, 0); p54_setup_mac(priv); p54_beacon_update(priv, vif); p54_set_edcf(priv); } if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BEACON)) { priv->use_short_slot = info->use_short_slot; p54_set_edcf(priv); } if (changed & BSS_CHANGED_BASIC_RATES) { if (dev->conf.chandef.chan->band == NL80211_BAND_5GHZ) priv->basic_rate_mask = (info->basic_rates << 4); else priv->basic_rate_mask = info->basic_rates; p54_setup_mac(priv); if (priv->fw_var >= 0x500) p54_scan(priv, P54_SCAN_EXIT, 0); } if (changed & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) { priv->aid = vif->cfg.aid; priv->wakeup_timer = info->beacon_int * info->dtim_period * 5; p54_setup_mac(priv); } else { priv->wakeup_timer = 500; priv->aid = 0; } } mutex_unlock(&priv->conf_mutex); } static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct p54_common *priv = dev->priv; int slot, ret = 0; u8 algo = 0; u8 *addr = NULL; if (modparam_nohwcrypt) return -EOPNOTSUPP; if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) { /* * Unfortunately most/all firmwares are trying to decrypt * incoming management frames if a suitable key can be found. * However, in doing so the data in these frames gets * corrupted. So, we can't have firmware supported crypto * offload in this case. */ return -EOPNOTSUPP; } mutex_lock(&priv->conf_mutex); if (cmd == SET_KEY) { switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL | BR_DESC_PRIV_CAP_TKIP))) { ret = -EOPNOTSUPP; goto out_unlock; } key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; algo = P54_CRYPTO_TKIPMICHAEL; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) { ret = -EOPNOTSUPP; goto out_unlock; } key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; algo = P54_CRYPTO_WEP; break; case WLAN_CIPHER_SUITE_CCMP: if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) { ret = -EOPNOTSUPP; goto out_unlock; } key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; algo = P54_CRYPTO_AESCCMP; break; default: ret = -EOPNOTSUPP; goto out_unlock; } slot = bitmap_find_free_region(priv->used_rxkeys, priv->rx_keycache_size, 0); if (slot < 0) { /* * The device supports the chosen algorithm, but the * firmware does not provide enough key slots to store * all of them. * But encryption offload for outgoing frames is always * possible, so we just pretend that the upload was * successful and do the decryption in software. */ /* mark the key as invalid. */ key->hw_key_idx = 0xff; goto out_unlock; } key->flags |= IEEE80211_KEY_FLAG_RESERVE_TAILROOM; } else { slot = key->hw_key_idx; if (slot == 0xff) { /* This key was not uploaded into the rx key cache. */ goto out_unlock; } bitmap_release_region(priv->used_rxkeys, slot, 0); algo = 0; } if (sta) addr = sta->addr; ret = p54_upload_key(priv, algo, slot, key->keyidx, key->keylen, addr, key->key); if (ret) { bitmap_release_region(priv->used_rxkeys, slot, 0); ret = -EOPNOTSUPP; goto out_unlock; } key->hw_key_idx = slot; out_unlock: mutex_unlock(&priv->conf_mutex); return ret; } static int p54_get_survey(struct ieee80211_hw *dev, int idx, struct survey_info *survey) { struct p54_common *priv = dev->priv; struct ieee80211_channel *chan; int err, tries; bool in_use = false; if (idx >= priv->chan_num) return -ENOENT; #define MAX_TRIES 1 for (tries = 0; tries < MAX_TRIES; tries++) { chan = priv->curchan; if (chan && chan->hw_value == idx) { mutex_lock(&priv->conf_mutex); err = p54_wait_for_stats(dev); mutex_unlock(&priv->conf_mutex); if (err) return err; in_use = true; } memcpy(survey, &priv->survey[idx], sizeof(*survey)); if (in_use) { /* test if the reported statistics are valid. */ if (survey->time != 0) { survey->filled |= SURVEY_INFO_IN_USE; } else { /* * hw/fw has not accumulated enough sample sets. * Wait for 100ms, this ought to be enough to * get at least one non-null set of channel * usage statistics. */ msleep(100); continue; } } return 0; } return -ETIMEDOUT; #undef MAX_TRIES } static unsigned int p54_flush_count(struct p54_common *priv) { unsigned int total = 0, i; BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats)); /* * Because the firmware has the sole control over any frames * in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they * don't really count as pending or active. */ for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++) total += priv->tx_stats[i].len; return total; } static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif, u32 queues, bool drop) { struct p54_common *priv = dev->priv; unsigned int total, i; /* * Currently, it wouldn't really matter if we wait for one second * or 15 minutes. But once someone gets around and completes the * TODOs [ancel stuck frames / reset device] in p54_work, it will * suddenly make sense to wait that long. */ i = P54_STATISTICS_UPDATE * 2 / 20; /* * In this case no locking is required because as we speak the * queues have already been stopped and no new frames can sneak * up from behind. */ while ((total = p54_flush_count(priv)) && i--) { /* waste time */ msleep(20); } WARN(total, "tx flush timeout, unresponsive firmware"); } static void p54_set_coverage_class(struct ieee80211_hw *dev, s16 coverage_class) { struct p54_common *priv = dev->priv; mutex_lock(&priv->conf_mutex); /* support all coverage class values as in 802.11-2007 Table 7-27 */ priv->coverage_class = clamp_t(u8, coverage_class, 0, 31); p54_set_edcf(priv); mutex_unlock(&priv->conf_mutex); } static const struct ieee80211_ops p54_ops = { .add_chanctx = ieee80211_emulate_add_chanctx, .remove_chanctx = ieee80211_emulate_remove_chanctx, .change_chanctx = ieee80211_emulate_change_chanctx, .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = p54_tx_80211, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = p54_start, .stop = p54_stop, .add_interface = p54_add_interface, .remove_interface = p54_remove_interface, .set_tim = p54_set_tim, .sta_notify = p54_sta_notify, .sta_add = p54_sta_add_remove, .sta_remove = p54_sta_add_remove, .set_key = p54_set_key, .config = p54_config, .flush = p54_flush, .bss_info_changed = p54_bss_info_changed, .prepare_multicast = p54_prepare_multicast, .configure_filter = p54_configure_filter, .conf_tx = p54_conf_tx, .get_stats = p54_get_stats, .get_survey = p54_get_survey, .set_coverage_class = p54_set_coverage_class, }; struct ieee80211_hw *p54_init_common(size_t priv_data_len) { struct ieee80211_hw *dev; struct p54_common *priv; dev = ieee80211_alloc_hw(priv_data_len, &p54_ops); if (!dev) return NULL; priv = dev->priv; priv->hw = dev; priv->mode = NL80211_IFTYPE_UNSPECIFIED; priv->basic_rate_mask = 0x15f; spin_lock_init(&priv->tx_stats_lock); skb_queue_head_init(&priv->tx_queue); skb_queue_head_init(&priv->tx_pending); ieee80211_hw_set(dev, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(dev, MFP_CAPABLE); ieee80211_hw_set(dev, PS_NULLFUNC_STACK); ieee80211_hw_set(dev, SUPPORTS_PS); ieee80211_hw_set(dev, RX_INCLUDES_FCS); ieee80211_hw_set(dev, SIGNAL_DBM); dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT); priv->beacon_req_id = cpu_to_le32(0); priv->tx_stats[P54_QUEUE_BEACON].limit = 1; priv->tx_stats[P54_QUEUE_FWSCAN].limit = 1; priv->tx_stats[P54_QUEUE_MGMT].limit = 3; priv->tx_stats[P54_QUEUE_CAB].limit = 3; priv->tx_stats[P54_QUEUE_DATA].limit = 5; dev->queues = 1; priv->noise = -94; /* * We support at most 8 tries no matter which rate they're at, * we cannot support max_rates * max_rate_tries as we set it * here, but setting it correctly to 4/2 or so would limit us * artificially if the RC algorithm wants just two rates, so * let's say 4/7, we'll redistribute it at TX time, see the * comments there. */ dev->max_rates = 4; dev->max_rate_tries = 7; dev->extra_tx_headroom = sizeof(struct p54_hdr) + 4 + sizeof(struct p54_tx_data); /* * For now, disable PS by default because it affects * link stability significantly. */ dev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; mutex_init(&priv->conf_mutex); mutex_init(&priv->eeprom_mutex); init_completion(&priv->stat_comp); init_completion(&priv->eeprom_comp); init_completion(&priv->beacon_comp); INIT_DELAYED_WORK(&priv->work, p54_work); eth_broadcast_addr(priv->mc_maclist[0]); priv->curchan = NULL; p54_reset_stats(priv); return dev; } EXPORT_SYMBOL_GPL(p54_init_common); int p54_register_common(struct ieee80211_hw *dev, struct device *pdev) { struct p54_common __maybe_unused *priv = dev->priv; int err; err = ieee80211_register_hw(dev); if (err) { dev_err(pdev, "Cannot register device (%d).\n", err); return err; } priv->registered = true; #ifdef CONFIG_P54_LEDS err = p54_init_leds(priv); if (err) { p54_unregister_common(dev); return err; } #endif /* CONFIG_P54_LEDS */ dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy)); return 0; } EXPORT_SYMBOL_GPL(p54_register_common); void p54_free_common(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; unsigned int i; for (i = 0; i < NUM_NL80211_BANDS; i++) kfree(priv->band_table[i]); kfree(priv->iq_autocal); kfree(priv->output_limit); kfree(priv->curve_data); kfree(priv->rssi_db); bitmap_free(priv->used_rxkeys); kfree(priv->survey); priv->iq_autocal = NULL; priv->output_limit = NULL; priv->curve_data = NULL; priv->rssi_db = NULL; priv->used_rxkeys = NULL; priv->survey = NULL; ieee80211_free_hw(dev); } EXPORT_SYMBOL_GPL(p54_free_common); void p54_unregister_common(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; if (priv->registered) { priv->registered = false; #ifdef CONFIG_P54_LEDS p54_unregister_leds(priv); #endif /* CONFIG_P54_LEDS */ ieee80211_unregister_hw(dev); } mutex_destroy(&priv->conf_mutex); mutex_destroy(&priv->eeprom_mutex); } EXPORT_SYMBOL_GPL(p54_unregister_common); |
13 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> * Copyright (C) 2022, 2024 Intel Corporation */ #ifndef IEEE80211_RATE_H #define IEEE80211_RATE_H #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/types.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "sta_info.h" #include "driver-ops.h" struct rate_control_ref { const struct rate_control_ops *ops; void *priv; }; void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_tx_rate_control *txrc); void rate_control_tx_status(struct ieee80211_local *local, struct ieee80211_tx_status *st); void rate_control_rate_init(struct link_sta_info *link_sta); void rate_control_rate_init_all_links(struct sta_info *sta); void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct link_sta_info *link_sta, u32 changed); static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, struct sta_info *sta, gfp_t gfp) { spin_lock_init(&sta->rate_ctrl_lock); return ref->ops->alloc_sta(ref->priv, &sta->sta, gfp); } static inline void rate_control_free_sta(struct sta_info *sta) { struct rate_control_ref *ref = sta->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; void *priv_sta = sta->rate_ctrl_priv; ref->ops->free_sta(ref->priv, ista, priv_sta); } static inline void rate_control_add_sta_debugfs(struct sta_info *sta) { #ifdef CONFIG_MAC80211_DEBUGFS struct rate_control_ref *ref = sta->rate_ctrl; if (ref && sta->debugfs_dir && ref->ops->add_sta_debugfs) ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv, sta->debugfs_dir); #endif } extern const struct debugfs_short_fops rcname_ops; static inline void rate_control_add_debugfs(struct ieee80211_local *local) { #ifdef CONFIG_MAC80211_DEBUGFS struct dentry *debugfsdir; if (!local->rate_ctrl) return; if (!local->rate_ctrl->ops->add_debugfs) return; debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); local->debugfs.rcdir = debugfsdir; debugfs_create_file("name", 0400, debugfsdir, local->rate_ctrl, &rcname_ops); local->rate_ctrl->ops->add_debugfs(&local->hw, local->rate_ctrl->priv, debugfsdir); #endif } void ieee80211_check_rate_mask(struct ieee80211_link_data *link); /* Get a reference to the rate control algorithm. If `name' is NULL, get the * first available algorithm. */ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, const char *name); void rate_control_deinitialize(struct ieee80211_local *local); /* Rate control algorithms */ #ifdef CONFIG_MAC80211_RC_MINSTREL int rc80211_minstrel_init(void); void rc80211_minstrel_exit(void); #else static inline int rc80211_minstrel_init(void) { return 0; } static inline void rc80211_minstrel_exit(void) { } #endif #endif /* IEEE80211_RATE_H */ |
100 100 100 99 100 100 100 100 100 100 98 99 6 1 2 1 98 98 98 116 3 2 2 3 2 3 1 2 101 99 2 2 99 99 101 1 1 1 1 101 99 3 100 100 2 100 2 2 2 100 4 1 3 2 3 4 98 98 97 98 98 97 2 2 5 2 3 2 1 2 2 5 7 7 7 7 1 7 7 7 11 11 11 11 11 11 45 45 45 45 133 94 70 71 71 64 70 71 95 116 116 116 115 116 116 133 44 133 2 2 2 2 2 100 99 99 98 98 98 53 24 44 45 45 74 47 70 7 71 7 71 28 71 71 71 90 89 100 100 100 55 26 45 74 98 100 99 45 100 100 71 71 71 71 45 71 28 28 98 2 2 2 2 2 94 90 48 90 11 11 90 90 88 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 | // SPDX-License-Identifier: GPL-2.0 // rc-main.c - Remote Controller core module // // Copyright (C) 2009-2010 by Mauro Carvalho Chehab #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <media/rc-core.h> #include <linux/bsearch.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/leds.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/device.h> #include <linux/module.h> #include "rc-core-priv.h" /* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */ #define IR_TAB_MIN_SIZE 256 #define IR_TAB_MAX_SIZE 8192 static const struct { const char *name; unsigned int repeat_period; unsigned int scancode_bits; } protocols[] = { [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 125 }, [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 125 }, [RC_PROTO_RC5] = { .name = "rc-5", .scancode_bits = 0x1f7f, .repeat_period = 114 }, [RC_PROTO_RC5X_20] = { .name = "rc-5x-20", .scancode_bits = 0x1f7f3f, .repeat_period = 114 }, [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz", .scancode_bits = 0x2fff, .repeat_period = 114 }, [RC_PROTO_JVC] = { .name = "jvc", .scancode_bits = 0xffff, .repeat_period = 125 }, [RC_PROTO_SONY12] = { .name = "sony-12", .scancode_bits = 0x1f007f, .repeat_period = 100 }, [RC_PROTO_SONY15] = { .name = "sony-15", .scancode_bits = 0xff007f, .repeat_period = 100 }, [RC_PROTO_SONY20] = { .name = "sony-20", .scancode_bits = 0x1fff7f, .repeat_period = 100 }, [RC_PROTO_NEC] = { .name = "nec", .scancode_bits = 0xffff, .repeat_period = 110 }, [RC_PROTO_NECX] = { .name = "nec-x", .scancode_bits = 0xffffff, .repeat_period = 110 }, [RC_PROTO_NEC32] = { .name = "nec-32", .scancode_bits = 0xffffffff, .repeat_period = 110 }, [RC_PROTO_SANYO] = { .name = "sanyo", .scancode_bits = 0x1fffff, .repeat_period = 125 }, [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd", .scancode_bits = 0xffffff, .repeat_period = 100 }, [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse", .scancode_bits = 0x1fffff, .repeat_period = 100 }, [RC_PROTO_RC6_0] = { .name = "rc-6-0", .scancode_bits = 0xffff, .repeat_period = 114 }, [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20", .scancode_bits = 0xfffff, .repeat_period = 114 }, [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24", .scancode_bits = 0xffffff, .repeat_period = 114 }, [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32", .scancode_bits = 0xffffffff, .repeat_period = 114 }, [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce", .scancode_bits = 0xffff7fff, .repeat_period = 114 }, [RC_PROTO_SHARP] = { .name = "sharp", .scancode_bits = 0x1fff, .repeat_period = 125 }, [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 125 }, [RC_PROTO_CEC] = { .name = "cec", .repeat_period = 0 }, [RC_PROTO_IMON] = { .name = "imon", .scancode_bits = 0x7fffffff, .repeat_period = 114 }, [RC_PROTO_RCMM12] = { .name = "rc-mm-12", .scancode_bits = 0x00000fff, .repeat_period = 114 }, [RC_PROTO_RCMM24] = { .name = "rc-mm-24", .scancode_bits = 0x00ffffff, .repeat_period = 114 }, [RC_PROTO_RCMM32] = { .name = "rc-mm-32", .scancode_bits = 0xffffffff, .repeat_period = 114 }, [RC_PROTO_XBOX_DVD] = { .name = "xbox-dvd", .repeat_period = 64 }, }; /* Used to keep track of known keymaps */ static LIST_HEAD(rc_map_list); static DEFINE_SPINLOCK(rc_map_lock); static struct led_trigger *led_feedback; /* Used to keep track of rc devices */ static DEFINE_IDA(rc_ida); static struct rc_map_list *seek_rc_map(const char *name) { struct rc_map_list *map = NULL; spin_lock(&rc_map_lock); list_for_each_entry(map, &rc_map_list, list) { if (!strcmp(name, map->map.name)) { spin_unlock(&rc_map_lock); return map; } } spin_unlock(&rc_map_lock); return NULL; } struct rc_map *rc_map_get(const char *name) { struct rc_map_list *map; map = seek_rc_map(name); #ifdef CONFIG_MODULES if (!map) { int rc = request_module("%s", name); if (rc < 0) { pr_err("Couldn't load IR keymap %s\n", name); return NULL; } msleep(20); /* Give some time for IR to register */ map = seek_rc_map(name); } #endif if (!map) { pr_err("IR keymap %s not found\n", name); return NULL; } printk(KERN_INFO "Registered IR keymap %s\n", map->map.name); return &map->map; } EXPORT_SYMBOL_GPL(rc_map_get); int rc_map_register(struct rc_map_list *map) { spin_lock(&rc_map_lock); list_add_tail(&map->list, &rc_map_list); spin_unlock(&rc_map_lock); return 0; } EXPORT_SYMBOL_GPL(rc_map_register); void rc_map_unregister(struct rc_map_list *map) { spin_lock(&rc_map_lock); list_del(&map->list); spin_unlock(&rc_map_lock); } EXPORT_SYMBOL_GPL(rc_map_unregister); static struct rc_map_table empty[] = { { 0x2a, KEY_COFFEE }, }; static struct rc_map_list empty_map = { .map = { .scan = empty, .size = ARRAY_SIZE(empty), .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_EMPTY, } }; /** * scancode_to_u64() - converts scancode in &struct input_keymap_entry * @ke: keymap entry containing scancode to be converted. * @scancode: pointer to the location where converted scancode should * be stored. * * This function is a version of input_scancode_to_scalar specialized for * rc-core. */ static int scancode_to_u64(const struct input_keymap_entry *ke, u64 *scancode) { switch (ke->len) { case 1: *scancode = *((u8 *)ke->scancode); break; case 2: *scancode = *((u16 *)ke->scancode); break; case 4: *scancode = *((u32 *)ke->scancode); break; case 8: *scancode = *((u64 *)ke->scancode); break; default: return -EINVAL; } return 0; } /** * ir_create_table() - initializes a scancode table * @dev: the rc_dev device * @rc_map: the rc_map to initialize * @name: name to assign to the table * @rc_proto: ir type to assign to the new table * @size: initial size of the table * * This routine will initialize the rc_map and will allocate * memory to hold at least the specified number of elements. * * return: zero on success or a negative error code */ static int ir_create_table(struct rc_dev *dev, struct rc_map *rc_map, const char *name, u64 rc_proto, size_t size) { rc_map->name = kstrdup(name, GFP_KERNEL); if (!rc_map->name) return -ENOMEM; rc_map->rc_proto = rc_proto; rc_map->alloc = roundup_pow_of_two(size * sizeof(struct rc_map_table)); rc_map->size = rc_map->alloc / sizeof(struct rc_map_table); rc_map->scan = kmalloc(rc_map->alloc, GFP_KERNEL); if (!rc_map->scan) { kfree(rc_map->name); rc_map->name = NULL; return -ENOMEM; } dev_dbg(&dev->dev, "Allocated space for %u keycode entries (%u bytes)\n", rc_map->size, rc_map->alloc); return 0; } /** * ir_free_table() - frees memory allocated by a scancode table * @rc_map: the table whose mappings need to be freed * * This routine will free memory alloctaed for key mappings used by given * scancode table. */ static void ir_free_table(struct rc_map *rc_map) { rc_map->size = 0; kfree(rc_map->name); rc_map->name = NULL; kfree(rc_map->scan); rc_map->scan = NULL; } /** * ir_resize_table() - resizes a scancode table if necessary * @dev: the rc_dev device * @rc_map: the rc_map to resize * @gfp_flags: gfp flags to use when allocating memory * * This routine will shrink the rc_map if it has lots of * unused entries and grow it if it is full. * * return: zero on success or a negative error code */ static int ir_resize_table(struct rc_dev *dev, struct rc_map *rc_map, gfp_t gfp_flags) { unsigned int oldalloc = rc_map->alloc; unsigned int newalloc = oldalloc; struct rc_map_table *oldscan = rc_map->scan; struct rc_map_table *newscan; if (rc_map->size == rc_map->len) { /* All entries in use -> grow keytable */ if (rc_map->alloc >= IR_TAB_MAX_SIZE) return -ENOMEM; newalloc *= 2; dev_dbg(&dev->dev, "Growing table to %u bytes\n", newalloc); } if ((rc_map->len * 3 < rc_map->size) && (oldalloc > IR_TAB_MIN_SIZE)) { /* Less than 1/3 of entries in use -> shrink keytable */ newalloc /= 2; dev_dbg(&dev->dev, "Shrinking table to %u bytes\n", newalloc); } if (newalloc == oldalloc) return 0; newscan = kmalloc(newalloc, gfp_flags); if (!newscan) return -ENOMEM; memcpy(newscan, rc_map->scan, rc_map->len * sizeof(struct rc_map_table)); rc_map->scan = newscan; rc_map->alloc = newalloc; rc_map->size = rc_map->alloc / sizeof(struct rc_map_table); kfree(oldscan); return 0; } /** * ir_update_mapping() - set a keycode in the scancode->keycode table * @dev: the struct rc_dev device descriptor * @rc_map: scancode table to be adjusted * @index: index of the mapping that needs to be updated * @new_keycode: the desired keycode * * This routine is used to update scancode->keycode mapping at given * position. * * return: previous keycode assigned to the mapping * */ static unsigned int ir_update_mapping(struct rc_dev *dev, struct rc_map *rc_map, unsigned int index, unsigned int new_keycode) { int old_keycode = rc_map->scan[index].keycode; int i; /* Did the user wish to remove the mapping? */ if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) { dev_dbg(&dev->dev, "#%d: Deleting scan 0x%04llx\n", index, rc_map->scan[index].scancode); rc_map->len--; memmove(&rc_map->scan[index], &rc_map->scan[index+ 1], (rc_map->len - index) * sizeof(struct rc_map_table)); } else { dev_dbg(&dev->dev, "#%d: %s scan 0x%04llx with key 0x%04x\n", index, old_keycode == KEY_RESERVED ? "New" : "Replacing", rc_map->scan[index].scancode, new_keycode); rc_map->scan[index].keycode = new_keycode; __set_bit(new_keycode, dev->input_dev->keybit); } if (old_keycode != KEY_RESERVED) { /* A previous mapping was updated... */ __clear_bit(old_keycode, dev->input_dev->keybit); /* ... but another scancode might use the same keycode */ for (i = 0; i < rc_map->len; i++) { if (rc_map->scan[i].keycode == old_keycode) { __set_bit(old_keycode, dev->input_dev->keybit); break; } } /* Possibly shrink the keytable, failure is not a problem */ ir_resize_table(dev, rc_map, GFP_ATOMIC); } return old_keycode; } /** * ir_establish_scancode() - set a keycode in the scancode->keycode table * @dev: the struct rc_dev device descriptor * @rc_map: scancode table to be searched * @scancode: the desired scancode * @resize: controls whether we allowed to resize the table to * accommodate not yet present scancodes * * This routine is used to locate given scancode in rc_map. * If scancode is not yet present the routine will allocate a new slot * for it. * * return: index of the mapping containing scancode in question * or -1U in case of failure. */ static unsigned int ir_establish_scancode(struct rc_dev *dev, struct rc_map *rc_map, u64 scancode, bool resize) { unsigned int i; /* * Unfortunately, some hardware-based IR decoders don't provide * all bits for the complete IR code. In general, they provide only * the command part of the IR code. Yet, as it is possible to replace * the provided IR with another one, it is needed to allow loading * IR tables from other remotes. So, we support specifying a mask to * indicate the valid bits of the scancodes. */ if (dev->scancode_mask) scancode &= dev->scancode_mask; /* First check if we already have a mapping for this ir command */ for (i = 0; i < rc_map->len; i++) { if (rc_map->scan[i].scancode == scancode) return i; /* Keytable is sorted from lowest to highest scancode */ if (rc_map->scan[i].scancode >= scancode) break; } /* No previous mapping found, we might need to grow the table */ if (rc_map->size == rc_map->len) { if (!resize || ir_resize_table(dev, rc_map, GFP_ATOMIC)) return -1U; } /* i is the proper index to insert our new keycode */ if (i < rc_map->len) memmove(&rc_map->scan[i + 1], &rc_map->scan[i], (rc_map->len - i) * sizeof(struct rc_map_table)); rc_map->scan[i].scancode = scancode; rc_map->scan[i].keycode = KEY_RESERVED; rc_map->len++; return i; } /** * ir_setkeycode() - set a keycode in the scancode->keycode table * @idev: the struct input_dev device descriptor * @ke: Input keymap entry * @old_keycode: result * * This routine is used to handle evdev EVIOCSKEY ioctl. * * return: -EINVAL if the keycode could not be inserted, otherwise zero. */ static int ir_setkeycode(struct input_dev *idev, const struct input_keymap_entry *ke, unsigned int *old_keycode) { struct rc_dev *rdev = input_get_drvdata(idev); struct rc_map *rc_map = &rdev->rc_map; unsigned int index; u64 scancode; int retval = 0; unsigned long flags; spin_lock_irqsave(&rc_map->lock, flags); if (ke->flags & INPUT_KEYMAP_BY_INDEX) { index = ke->index; if (index >= rc_map->len) { retval = -EINVAL; goto out; } } else { retval = scancode_to_u64(ke, &scancode); if (retval) goto out; index = ir_establish_scancode(rdev, rc_map, scancode, true); if (index >= rc_map->len) { retval = -ENOMEM; goto out; } } *old_keycode = ir_update_mapping(rdev, rc_map, index, ke->keycode); out: spin_unlock_irqrestore(&rc_map->lock, flags); return retval; } /** * ir_setkeytable() - sets several entries in the scancode->keycode table * @dev: the struct rc_dev device descriptor * @from: the struct rc_map to copy entries from * * This routine is used to handle table initialization. * * return: -ENOMEM if all keycodes could not be inserted, otherwise zero. */ static int ir_setkeytable(struct rc_dev *dev, const struct rc_map *from) { struct rc_map *rc_map = &dev->rc_map; unsigned int i, index; int rc; rc = ir_create_table(dev, rc_map, from->name, from->rc_proto, from->size); if (rc) return rc; for (i = 0; i < from->size; i++) { index = ir_establish_scancode(dev, rc_map, from->scan[i].scancode, false); if (index >= rc_map->len) { rc = -ENOMEM; break; } ir_update_mapping(dev, rc_map, index, from->scan[i].keycode); } if (rc) ir_free_table(rc_map); return rc; } static int rc_map_cmp(const void *key, const void *elt) { const u64 *scancode = key; const struct rc_map_table *e = elt; if (*scancode < e->scancode) return -1; else if (*scancode > e->scancode) return 1; return 0; } /** * ir_lookup_by_scancode() - locate mapping by scancode * @rc_map: the struct rc_map to search * @scancode: scancode to look for in the table * * This routine performs binary search in RC keykeymap table for * given scancode. * * return: index in the table, -1U if not found */ static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map, u64 scancode) { struct rc_map_table *res; res = bsearch(&scancode, rc_map->scan, rc_map->len, sizeof(struct rc_map_table), rc_map_cmp); if (!res) return -1U; else return res - rc_map->scan; } /** * ir_getkeycode() - get a keycode from the scancode->keycode table * @idev: the struct input_dev device descriptor * @ke: Input keymap entry * * This routine is used to handle evdev EVIOCGKEY ioctl. * * return: always returns zero. */ static int ir_getkeycode(struct input_dev *idev, struct input_keymap_entry *ke) { struct rc_dev *rdev = input_get_drvdata(idev); struct rc_map *rc_map = &rdev->rc_map; struct rc_map_table *entry; unsigned long flags; unsigned int index; u64 scancode; int retval; spin_lock_irqsave(&rc_map->lock, flags); if (ke->flags & INPUT_KEYMAP_BY_INDEX) { index = ke->index; } else { retval = scancode_to_u64(ke, &scancode); if (retval) goto out; index = ir_lookup_by_scancode(rc_map, scancode); } if (index < rc_map->len) { entry = &rc_map->scan[index]; ke->index = index; ke->keycode = entry->keycode; ke->len = sizeof(entry->scancode); memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode)); } else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) { /* * We do not really know the valid range of scancodes * so let's respond with KEY_RESERVED to anything we * do not have mapping for [yet]. */ ke->index = index; ke->keycode = KEY_RESERVED; } else { retval = -EINVAL; goto out; } retval = 0; out: spin_unlock_irqrestore(&rc_map->lock, flags); return retval; } /** * rc_g_keycode_from_table() - gets the keycode that corresponds to a scancode * @dev: the struct rc_dev descriptor of the device * @scancode: the scancode to look for * * This routine is used by drivers which need to convert a scancode to a * keycode. Normally it should not be used since drivers should have no * interest in keycodes. * * return: the corresponding keycode, or KEY_RESERVED */ u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode) { struct rc_map *rc_map = &dev->rc_map; unsigned int keycode; unsigned int index; unsigned long flags; spin_lock_irqsave(&rc_map->lock, flags); index = ir_lookup_by_scancode(rc_map, scancode); keycode = index < rc_map->len ? rc_map->scan[index].keycode : KEY_RESERVED; spin_unlock_irqrestore(&rc_map->lock, flags); if (keycode != KEY_RESERVED) dev_dbg(&dev->dev, "%s: scancode 0x%04llx keycode 0x%02x\n", dev->device_name, scancode, keycode); return keycode; } EXPORT_SYMBOL_GPL(rc_g_keycode_from_table); /** * ir_do_keyup() - internal function to signal the release of a keypress * @dev: the struct rc_dev descriptor of the device * @sync: whether or not to call input_sync * * This function is used internally to release a keypress, it must be * called with keylock held. */ static void ir_do_keyup(struct rc_dev *dev, bool sync) { if (!dev->keypressed) return; dev_dbg(&dev->dev, "keyup key 0x%04x\n", dev->last_keycode); timer_delete(&dev->timer_repeat); input_report_key(dev->input_dev, dev->last_keycode, 0); led_trigger_event(led_feedback, LED_OFF); if (sync) input_sync(dev->input_dev); dev->keypressed = false; } /** * rc_keyup() - signals the release of a keypress * @dev: the struct rc_dev descriptor of the device * * This routine is used to signal that a key has been released on the * remote control. */ void rc_keyup(struct rc_dev *dev) { unsigned long flags; spin_lock_irqsave(&dev->keylock, flags); ir_do_keyup(dev, true); spin_unlock_irqrestore(&dev->keylock, flags); } EXPORT_SYMBOL_GPL(rc_keyup); /** * ir_timer_keyup() - generates a keyup event after a timeout * * @t: a pointer to the struct timer_list * * This routine will generate a keyup event some time after a keydown event * is generated when no further activity has been detected. */ static void ir_timer_keyup(struct timer_list *t) { struct rc_dev *dev = timer_container_of(dev, t, timer_keyup); unsigned long flags; /* * ir->keyup_jiffies is used to prevent a race condition if a * hardware interrupt occurs at this point and the keyup timer * event is moved further into the future as a result. * * The timer will then be reactivated and this function called * again in the future. We need to exit gracefully in that case * to allow the input subsystem to do its auto-repeat magic or * a keyup event might follow immediately after the keydown. */ spin_lock_irqsave(&dev->keylock, flags); if (time_is_before_eq_jiffies(dev->keyup_jiffies)) ir_do_keyup(dev, true); spin_unlock_irqrestore(&dev->keylock, flags); } /** * ir_timer_repeat() - generates a repeat event after a timeout * * @t: a pointer to the struct timer_list * * This routine will generate a soft repeat event every REP_PERIOD * milliseconds. */ static void ir_timer_repeat(struct timer_list *t) { struct rc_dev *dev = timer_container_of(dev, t, timer_repeat); struct input_dev *input = dev->input_dev; unsigned long flags; spin_lock_irqsave(&dev->keylock, flags); if (dev->keypressed) { input_event(input, EV_KEY, dev->last_keycode, 2); input_sync(input); if (input->rep[REP_PERIOD]) mod_timer(&dev->timer_repeat, jiffies + msecs_to_jiffies(input->rep[REP_PERIOD])); } spin_unlock_irqrestore(&dev->keylock, flags); } static unsigned int repeat_period(int protocol) { if (protocol >= ARRAY_SIZE(protocols)) return 100; return protocols[protocol].repeat_period; } /** * rc_repeat() - signals that a key is still pressed * @dev: the struct rc_dev descriptor of the device * * This routine is used by IR decoders when a repeat message which does * not include the necessary bits to reproduce the scancode has been * received. */ void rc_repeat(struct rc_dev *dev) { unsigned long flags; unsigned int timeout = usecs_to_jiffies(dev->timeout) + msecs_to_jiffies(repeat_period(dev->last_protocol)); struct lirc_scancode sc = { .scancode = dev->last_scancode, .rc_proto = dev->last_protocol, .keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED, .flags = LIRC_SCANCODE_FLAG_REPEAT | (dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0) }; if (dev->allowed_protocols != RC_PROTO_BIT_CEC) lirc_scancode_event(dev, &sc); spin_lock_irqsave(&dev->keylock, flags); if (dev->last_scancode <= U32_MAX) { input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode); input_sync(dev->input_dev); } if (dev->keypressed) { dev->keyup_jiffies = jiffies + timeout; mod_timer(&dev->timer_keyup, dev->keyup_jiffies); } spin_unlock_irqrestore(&dev->keylock, flags); } EXPORT_SYMBOL_GPL(rc_repeat); /** * ir_do_keydown() - internal function to process a keypress * @dev: the struct rc_dev descriptor of the device * @protocol: the protocol of the keypress * @scancode: the scancode of the keypress * @keycode: the keycode of the keypress * @toggle: the toggle value of the keypress * * This function is used internally to register a keypress, it must be * called with keylock held. */ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode, u32 keycode, u8 toggle) { bool new_event = (!dev->keypressed || dev->last_protocol != protocol || dev->last_scancode != scancode || dev->last_toggle != toggle); struct lirc_scancode sc = { .scancode = scancode, .rc_proto = protocol, .flags = (toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0) | (!new_event ? LIRC_SCANCODE_FLAG_REPEAT : 0), .keycode = keycode }; if (dev->allowed_protocols != RC_PROTO_BIT_CEC) lirc_scancode_event(dev, &sc); if (new_event && dev->keypressed) ir_do_keyup(dev, false); if (scancode <= U32_MAX) input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode); dev->last_protocol = protocol; dev->last_scancode = scancode; dev->last_toggle = toggle; dev->last_keycode = keycode; if (new_event && keycode != KEY_RESERVED) { /* Register a keypress */ dev->keypressed = true; dev_dbg(&dev->dev, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08llx\n", dev->device_name, keycode, protocol, scancode); input_report_key(dev->input_dev, keycode, 1); led_trigger_event(led_feedback, LED_FULL); } /* * For CEC, start sending repeat messages as soon as the first * repeated message is sent, as long as REP_DELAY = 0 and REP_PERIOD * is non-zero. Otherwise, the input layer will generate repeat * messages. */ if (!new_event && keycode != KEY_RESERVED && dev->allowed_protocols == RC_PROTO_BIT_CEC && !timer_pending(&dev->timer_repeat) && dev->input_dev->rep[REP_PERIOD] && !dev->input_dev->rep[REP_DELAY]) { input_event(dev->input_dev, EV_KEY, keycode, 2); mod_timer(&dev->timer_repeat, jiffies + msecs_to_jiffies(dev->input_dev->rep[REP_PERIOD])); } input_sync(dev->input_dev); } /** * rc_keydown() - generates input event for a key press * @dev: the struct rc_dev descriptor of the device * @protocol: the protocol for the keypress * @scancode: the scancode for the keypress * @toggle: the toggle value (protocol dependent, if the protocol doesn't * support toggle values, this should be set to zero) * * This routine is used to signal that a key has been pressed on the * remote control. */ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode, u8 toggle) { unsigned long flags; u32 keycode = rc_g_keycode_from_table(dev, scancode); spin_lock_irqsave(&dev->keylock, flags); ir_do_keydown(dev, protocol, scancode, keycode, toggle); if (dev->keypressed) { dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) + msecs_to_jiffies(repeat_period(protocol)); mod_timer(&dev->timer_keyup, dev->keyup_jiffies); } spin_unlock_irqrestore(&dev->keylock, flags); } EXPORT_SYMBOL_GPL(rc_keydown); /** * rc_keydown_notimeout() - generates input event for a key press without * an automatic keyup event at a later time * @dev: the struct rc_dev descriptor of the device * @protocol: the protocol for the keypress * @scancode: the scancode for the keypress * @toggle: the toggle value (protocol dependent, if the protocol doesn't * support toggle values, this should be set to zero) * * This routine is used to signal that a key has been pressed on the * remote control. The driver must manually call rc_keyup() at a later stage. */ void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol, u64 scancode, u8 toggle) { unsigned long flags; u32 keycode = rc_g_keycode_from_table(dev, scancode); spin_lock_irqsave(&dev->keylock, flags); ir_do_keydown(dev, protocol, scancode, keycode, toggle); spin_unlock_irqrestore(&dev->keylock, flags); } EXPORT_SYMBOL_GPL(rc_keydown_notimeout); /** * rc_validate_scancode() - checks that a scancode is valid for a protocol. * For nec, it should do the opposite of ir_nec_bytes_to_scancode() * @proto: protocol * @scancode: scancode */ bool rc_validate_scancode(enum rc_proto proto, u32 scancode) { switch (proto) { /* * NECX has a 16-bit address; if the lower 8 bits match the upper * 8 bits inverted, then the address would match regular nec. */ case RC_PROTO_NECX: if ((((scancode >> 16) ^ ~(scancode >> 8)) & 0xff) == 0) return false; break; /* * NEC32 has a 16 bit address and 16 bit command. If the lower 8 bits * of the command match the upper 8 bits inverted, then it would * be either NEC or NECX. */ case RC_PROTO_NEC32: if ((((scancode >> 8) ^ ~scancode) & 0xff) == 0) return false; break; /* * If the customer code (top 32-bit) is 0x800f, it is MCE else it * is regular mode-6a 32 bit */ case RC_PROTO_RC6_MCE: if ((scancode & 0xffff0000) != 0x800f0000) return false; break; case RC_PROTO_RC6_6A_32: if ((scancode & 0xffff0000) == 0x800f0000) return false; break; default: break; } return true; } /** * rc_validate_filter() - checks that the scancode and mask are valid and * provides sensible defaults * @dev: the struct rc_dev descriptor of the device * @filter: the scancode and mask * * return: 0 or -EINVAL if the filter is not valid */ static int rc_validate_filter(struct rc_dev *dev, struct rc_scancode_filter *filter) { u32 mask, s = filter->data; enum rc_proto protocol = dev->wakeup_protocol; if (protocol >= ARRAY_SIZE(protocols)) return -EINVAL; mask = protocols[protocol].scancode_bits; if (!rc_validate_scancode(protocol, s)) return -EINVAL; filter->data &= mask; filter->mask &= mask; /* * If we have to raw encode the IR for wakeup, we cannot have a mask */ if (dev->encode_wakeup && filter->mask != 0 && filter->mask != mask) return -EINVAL; return 0; } int rc_open(struct rc_dev *rdev) { int rval = 0; if (!rdev) return -EINVAL; mutex_lock(&rdev->lock); if (!rdev->registered) { rval = -ENODEV; } else { if (!rdev->users++ && rdev->open) rval = rdev->open(rdev); if (rval) rdev->users--; } mutex_unlock(&rdev->lock); return rval; } static int ir_open(struct input_dev *idev) { struct rc_dev *rdev = input_get_drvdata(idev); return rc_open(rdev); } void rc_close(struct rc_dev *rdev) { if (rdev) { mutex_lock(&rdev->lock); if (!--rdev->users && rdev->close && rdev->registered) rdev->close(rdev); mutex_unlock(&rdev->lock); } } static void ir_close(struct input_dev *idev) { struct rc_dev *rdev = input_get_drvdata(idev); rc_close(rdev); } /* class for /sys/class/rc */ static char *rc_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev)); } static struct class rc_class = { .name = "rc", .devnode = rc_devnode, }; /* * These are the protocol textual descriptions that are * used by the sysfs protocols file. Note that the order * of the entries is relevant. */ static const struct { u64 type; const char *name; const char *module_name; } proto_names[] = { { RC_PROTO_BIT_NONE, "none", NULL }, { RC_PROTO_BIT_OTHER, "other", NULL }, { RC_PROTO_BIT_UNKNOWN, "unknown", NULL }, { RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20, "rc-5", "ir-rc5-decoder" }, { RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32, "nec", "ir-nec-decoder" }, { RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | RC_PROTO_BIT_RC6_MCE, "rc-6", "ir-rc6-decoder" }, { RC_PROTO_BIT_JVC, "jvc", "ir-jvc-decoder" }, { RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | RC_PROTO_BIT_SONY20, "sony", "ir-sony-decoder" }, { RC_PROTO_BIT_RC5_SZ, "rc-5-sz", "ir-rc5-decoder" }, { RC_PROTO_BIT_SANYO, "sanyo", "ir-sanyo-decoder" }, { RC_PROTO_BIT_SHARP, "sharp", "ir-sharp-decoder" }, { RC_PROTO_BIT_MCIR2_KBD | RC_PROTO_BIT_MCIR2_MSE, "mce_kbd", "ir-mce_kbd-decoder" }, { RC_PROTO_BIT_XMP, "xmp", "ir-xmp-decoder" }, { RC_PROTO_BIT_CEC, "cec", NULL }, { RC_PROTO_BIT_IMON, "imon", "ir-imon-decoder" }, { RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32, "rc-mm", "ir-rcmm-decoder" }, { RC_PROTO_BIT_XBOX_DVD, "xbox-dvd", NULL }, }; /** * struct rc_filter_attribute - Device attribute relating to a filter type. * @attr: Device attribute. * @type: Filter type. * @mask: false for filter value, true for filter mask. */ struct rc_filter_attribute { struct device_attribute attr; enum rc_filter_type type; bool mask; }; #define to_rc_filter_attr(a) container_of(a, struct rc_filter_attribute, attr) #define RC_FILTER_ATTR(_name, _mode, _show, _store, _type, _mask) \ struct rc_filter_attribute dev_attr_##_name = { \ .attr = __ATTR(_name, _mode, _show, _store), \ .type = (_type), \ .mask = (_mask), \ } /** * show_protocols() - shows the current IR protocol(s) * @device: the device descriptor * @mattr: the device attribute struct * @buf: a pointer to the output buffer * * This routine is a callback routine for input read the IR protocol type(s). * it is triggered by reading /sys/class/rc/rc?/protocols. * It returns the protocol names of supported protocols. * Enabled protocols are printed in brackets. * * dev->lock is taken to guard against races between * store_protocols and show_protocols. */ static ssize_t show_protocols(struct device *device, struct device_attribute *mattr, char *buf) { struct rc_dev *dev = to_rc_dev(device); u64 allowed, enabled; char *tmp = buf; int i; mutex_lock(&dev->lock); enabled = dev->enabled_protocols; allowed = dev->allowed_protocols; if (dev->raw && !allowed) allowed = ir_raw_get_allowed_protocols(); mutex_unlock(&dev->lock); dev_dbg(&dev->dev, "%s: allowed - 0x%llx, enabled - 0x%llx\n", __func__, (long long)allowed, (long long)enabled); for (i = 0; i < ARRAY_SIZE(proto_names); i++) { if (allowed & enabled & proto_names[i].type) tmp += sprintf(tmp, "[%s] ", proto_names[i].name); else if (allowed & proto_names[i].type) tmp += sprintf(tmp, "%s ", proto_names[i].name); if (allowed & proto_names[i].type) allowed &= ~proto_names[i].type; } #ifdef CONFIG_LIRC if (dev->driver_type == RC_DRIVER_IR_RAW) tmp += sprintf(tmp, "[lirc] "); #endif if (tmp != buf) tmp--; *tmp = '\n'; return tmp + 1 - buf; } /** * parse_protocol_change() - parses a protocol change request * @dev: rc_dev device * @protocols: pointer to the bitmask of current protocols * @buf: pointer to the buffer with a list of changes * * Writing "+proto" will add a protocol to the protocol mask. * Writing "-proto" will remove a protocol from protocol mask. * Writing "proto" will enable only "proto". * Writing "none" will disable all protocols. * Returns the number of changes performed or a negative error code. */ static int parse_protocol_change(struct rc_dev *dev, u64 *protocols, const char *buf) { const char *tmp; unsigned count = 0; bool enable, disable; u64 mask; int i; while ((tmp = strsep((char **)&buf, " \n")) != NULL) { if (!*tmp) break; if (*tmp == '+') { enable = true; disable = false; tmp++; } else if (*tmp == '-') { enable = false; disable = true; tmp++; } else { enable = false; disable = false; } for (i = 0; i < ARRAY_SIZE(proto_names); i++) { if (!strcasecmp(tmp, proto_names[i].name)) { mask = proto_names[i].type; break; } } if (i == ARRAY_SIZE(proto_names)) { if (!strcasecmp(tmp, "lirc")) mask = 0; else { dev_dbg(&dev->dev, "Unknown protocol: '%s'\n", tmp); return -EINVAL; } } count++; if (enable) *protocols |= mask; else if (disable) *protocols &= ~mask; else *protocols = mask; } if (!count) { dev_dbg(&dev->dev, "Protocol not specified\n"); return -EINVAL; } return count; } void ir_raw_load_modules(u64 *protocols) { u64 available; int i, ret; for (i = 0; i < ARRAY_SIZE(proto_names); i++) { if (proto_names[i].type == RC_PROTO_BIT_NONE || proto_names[i].type & (RC_PROTO_BIT_OTHER | RC_PROTO_BIT_UNKNOWN)) continue; available = ir_raw_get_allowed_protocols(); if (!(*protocols & proto_names[i].type & ~available)) continue; if (!proto_names[i].module_name) { pr_err("Can't enable IR protocol %s\n", proto_names[i].name); *protocols &= ~proto_names[i].type; continue; } ret = request_module("%s", proto_names[i].module_name); if (ret < 0) { pr_err("Couldn't load IR protocol module %s\n", proto_names[i].module_name); *protocols &= ~proto_names[i].type; continue; } msleep(20); available = ir_raw_get_allowed_protocols(); if (!(*protocols & proto_names[i].type & ~available)) continue; pr_err("Loaded IR protocol module %s, but protocol %s still not available\n", proto_names[i].module_name, proto_names[i].name); *protocols &= ~proto_names[i].type; } } /** * store_protocols() - changes the current/wakeup IR protocol(s) * @device: the device descriptor * @mattr: the device attribute struct * @buf: a pointer to the input buffer * @len: length of the input buffer * * This routine is for changing the IR protocol type. * It is triggered by writing to /sys/class/rc/rc?/[wakeup_]protocols. * See parse_protocol_change() for the valid commands. * Returns @len on success or a negative error code. * * dev->lock is taken to guard against races between * store_protocols and show_protocols. */ static ssize_t store_protocols(struct device *device, struct device_attribute *mattr, const char *buf, size_t len) { struct rc_dev *dev = to_rc_dev(device); u64 *current_protocols; struct rc_scancode_filter *filter; u64 old_protocols, new_protocols; ssize_t rc; dev_dbg(&dev->dev, "Normal protocol change requested\n"); current_protocols = &dev->enabled_protocols; filter = &dev->scancode_filter; if (!dev->change_protocol) { dev_dbg(&dev->dev, "Protocol switching not supported\n"); return -EINVAL; } mutex_lock(&dev->lock); if (!dev->registered) { mutex_unlock(&dev->lock); return -ENODEV; } old_protocols = *current_protocols; new_protocols = old_protocols; rc = parse_protocol_change(dev, &new_protocols, buf); if (rc < 0) goto out; if (dev->driver_type == RC_DRIVER_IR_RAW) ir_raw_load_modules(&new_protocols); rc = dev->change_protocol(dev, &new_protocols); if (rc < 0) { dev_dbg(&dev->dev, "Error setting protocols to 0x%llx\n", (long long)new_protocols); goto out; } if (new_protocols != old_protocols) { *current_protocols = new_protocols; dev_dbg(&dev->dev, "Protocols changed to 0x%llx\n", (long long)new_protocols); } /* * If a protocol change was attempted the filter may need updating, even * if the actual protocol mask hasn't changed (since the driver may have * cleared the filter). * Try setting the same filter with the new protocol (if any). * Fall back to clearing the filter. */ if (dev->s_filter && filter->mask) { if (new_protocols) rc = dev->s_filter(dev, filter); else rc = -1; if (rc < 0) { filter->data = 0; filter->mask = 0; dev->s_filter(dev, filter); } } rc = len; out: mutex_unlock(&dev->lock); return rc; } /** * show_filter() - shows the current scancode filter value or mask * @device: the device descriptor * @attr: the device attribute struct * @buf: a pointer to the output buffer * * This routine is a callback routine to read a scancode filter value or mask. * It is triggered by reading /sys/class/rc/rc?/[wakeup_]filter[_mask]. * It prints the current scancode filter value or mask of the appropriate filter * type in hexadecimal into @buf and returns the size of the buffer. * * Bits of the filter value corresponding to set bits in the filter mask are * compared against input scancodes and non-matching scancodes are discarded. * * dev->lock is taken to guard against races between * store_filter and show_filter. */ static ssize_t show_filter(struct device *device, struct device_attribute *attr, char *buf) { struct rc_dev *dev = to_rc_dev(device); struct rc_filter_attribute *fattr = to_rc_filter_attr(attr); struct rc_scancode_filter *filter; u32 val; mutex_lock(&dev->lock); if (fattr->type == RC_FILTER_NORMAL) filter = &dev->scancode_filter; else filter = &dev->scancode_wakeup_filter; if (fattr->mask) val = filter->mask; else val = filter->data; mutex_unlock(&dev->lock); return sprintf(buf, "%#x\n", val); } /** * store_filter() - changes the scancode filter value * @device: the device descriptor * @attr: the device attribute struct * @buf: a pointer to the input buffer * @len: length of the input buffer * * This routine is for changing a scancode filter value or mask. * It is triggered by writing to /sys/class/rc/rc?/[wakeup_]filter[_mask]. * Returns -EINVAL if an invalid filter value for the current protocol was * specified or if scancode filtering is not supported by the driver, otherwise * returns @len. * * Bits of the filter value corresponding to set bits in the filter mask are * compared against input scancodes and non-matching scancodes are discarded. * * dev->lock is taken to guard against races between * store_filter and show_filter. */ static ssize_t store_filter(struct device *device, struct device_attribute *attr, const char *buf, size_t len) { struct rc_dev *dev = to_rc_dev(device); struct rc_filter_attribute *fattr = to_rc_filter_attr(attr); struct rc_scancode_filter new_filter, *filter; int ret; unsigned long val; int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter); ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; if (fattr->type == RC_FILTER_NORMAL) { set_filter = dev->s_filter; filter = &dev->scancode_filter; } else { set_filter = dev->s_wakeup_filter; filter = &dev->scancode_wakeup_filter; } if (!set_filter) return -EINVAL; mutex_lock(&dev->lock); if (!dev->registered) { mutex_unlock(&dev->lock); return -ENODEV; } new_filter = *filter; if (fattr->mask) new_filter.mask = val; else new_filter.data = val; if (fattr->type == RC_FILTER_WAKEUP) { /* * Refuse to set a filter unless a protocol is enabled * and the filter is valid for that protocol */ if (dev->wakeup_protocol != RC_PROTO_UNKNOWN) ret = rc_validate_filter(dev, &new_filter); else ret = -EINVAL; if (ret != 0) goto unlock; } if (fattr->type == RC_FILTER_NORMAL && !dev->enabled_protocols && val) { /* refuse to set a filter unless a protocol is enabled */ ret = -EINVAL; goto unlock; } ret = set_filter(dev, &new_filter); if (ret < 0) goto unlock; *filter = new_filter; unlock: mutex_unlock(&dev->lock); return (ret < 0) ? ret : len; } /** * show_wakeup_protocols() - shows the wakeup IR protocol * @device: the device descriptor * @mattr: the device attribute struct * @buf: a pointer to the output buffer * * This routine is a callback routine for input read the IR protocol type(s). * it is triggered by reading /sys/class/rc/rc?/wakeup_protocols. * It returns the protocol names of supported protocols. * The enabled protocols are printed in brackets. * * dev->lock is taken to guard against races between * store_wakeup_protocols and show_wakeup_protocols. */ static ssize_t show_wakeup_protocols(struct device *device, struct device_attribute *mattr, char *buf) { struct rc_dev *dev = to_rc_dev(device); u64 allowed; enum rc_proto enabled; char *tmp = buf; int i; mutex_lock(&dev->lock); allowed = dev->allowed_wakeup_protocols; enabled = dev->wakeup_protocol; mutex_unlock(&dev->lock); dev_dbg(&dev->dev, "%s: allowed - 0x%llx, enabled - %d\n", __func__, (long long)allowed, enabled); for (i = 0; i < ARRAY_SIZE(protocols); i++) { if (allowed & (1ULL << i)) { if (i == enabled) tmp += sprintf(tmp, "[%s] ", protocols[i].name); else tmp += sprintf(tmp, "%s ", protocols[i].name); } } if (tmp != buf) tmp--; *tmp = '\n'; return tmp + 1 - buf; } /** * store_wakeup_protocols() - changes the wakeup IR protocol(s) * @device: the device descriptor * @mattr: the device attribute struct * @buf: a pointer to the input buffer * @len: length of the input buffer * * This routine is for changing the IR protocol type. * It is triggered by writing to /sys/class/rc/rc?/wakeup_protocols. * Returns @len on success or a negative error code. * * dev->lock is taken to guard against races between * store_wakeup_protocols and show_wakeup_protocols. */ static ssize_t store_wakeup_protocols(struct device *device, struct device_attribute *mattr, const char *buf, size_t len) { struct rc_dev *dev = to_rc_dev(device); enum rc_proto protocol = RC_PROTO_UNKNOWN; ssize_t rc; u64 allowed; int i; mutex_lock(&dev->lock); if (!dev->registered) { mutex_unlock(&dev->lock); return -ENODEV; } allowed = dev->allowed_wakeup_protocols; if (!sysfs_streq(buf, "none")) { for (i = 0; i < ARRAY_SIZE(protocols); i++) { if ((allowed & (1ULL << i)) && sysfs_streq(buf, protocols[i].name)) { protocol = i; break; } } if (i == ARRAY_SIZE(protocols)) { rc = -EINVAL; goto out; } if (dev->encode_wakeup) { u64 mask = 1ULL << protocol; ir_raw_load_modules(&mask); if (!mask) { rc = -EINVAL; goto out; } } } if (dev->wakeup_protocol != protocol) { dev->wakeup_protocol = protocol; dev_dbg(&dev->dev, "Wakeup protocol changed to %d\n", protocol); if (protocol == RC_PROTO_RC6_MCE) dev->scancode_wakeup_filter.data = 0x800f0000; else dev->scancode_wakeup_filter.data = 0; dev->scancode_wakeup_filter.mask = 0; rc = dev->s_wakeup_filter(dev, &dev->scancode_wakeup_filter); if (rc == 0) rc = len; } else { rc = len; } out: mutex_unlock(&dev->lock); return rc; } static void rc_dev_release(struct device *device) { struct rc_dev *dev = to_rc_dev(device); kfree(dev); } static int rc_dev_uevent(const struct device *device, struct kobj_uevent_env *env) { struct rc_dev *dev = to_rc_dev(device); int ret = 0; mutex_lock(&dev->lock); if (!dev->registered) ret = -ENODEV; if (ret == 0 && dev->rc_map.name) ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name); if (ret == 0 && dev->driver_name) ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name); if (ret == 0 && dev->device_name) ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name); mutex_unlock(&dev->lock); return ret; } /* * Static device attribute struct with the sysfs attributes for IR's */ static struct device_attribute dev_attr_ro_protocols = __ATTR(protocols, 0444, show_protocols, NULL); static struct device_attribute dev_attr_rw_protocols = __ATTR(protocols, 0644, show_protocols, store_protocols); static DEVICE_ATTR(wakeup_protocols, 0644, show_wakeup_protocols, store_wakeup_protocols); static RC_FILTER_ATTR(filter, S_IRUGO|S_IWUSR, show_filter, store_filter, RC_FILTER_NORMAL, false); static RC_FILTER_ATTR(filter_mask, S_IRUGO|S_IWUSR, show_filter, store_filter, RC_FILTER_NORMAL, true); static RC_FILTER_ATTR(wakeup_filter, S_IRUGO|S_IWUSR, show_filter, store_filter, RC_FILTER_WAKEUP, false); static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR, show_filter, store_filter, RC_FILTER_WAKEUP, true); static struct attribute *rc_dev_rw_protocol_attrs[] = { &dev_attr_rw_protocols.attr, NULL, }; static const struct attribute_group rc_dev_rw_protocol_attr_grp = { .attrs = rc_dev_rw_protocol_attrs, }; static struct attribute *rc_dev_ro_protocol_attrs[] = { &dev_attr_ro_protocols.attr, NULL, }; static const struct attribute_group rc_dev_ro_protocol_attr_grp = { .attrs = rc_dev_ro_protocol_attrs, }; static struct attribute *rc_dev_filter_attrs[] = { &dev_attr_filter.attr.attr, &dev_attr_filter_mask.attr.attr, NULL, }; static const struct attribute_group rc_dev_filter_attr_grp = { .attrs = rc_dev_filter_attrs, }; static struct attribute *rc_dev_wakeup_filter_attrs[] = { &dev_attr_wakeup_filter.attr.attr, &dev_attr_wakeup_filter_mask.attr.attr, &dev_attr_wakeup_protocols.attr, NULL, }; static const struct attribute_group rc_dev_wakeup_filter_attr_grp = { .attrs = rc_dev_wakeup_filter_attrs, }; static const struct device_type rc_dev_type = { .release = rc_dev_release, .uevent = rc_dev_uevent, }; struct rc_dev *rc_allocate_device(enum rc_driver_type type) { struct rc_dev *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; if (type != RC_DRIVER_IR_RAW_TX) { dev->input_dev = input_allocate_device(); if (!dev->input_dev) { kfree(dev); return NULL; } dev->input_dev->getkeycode = ir_getkeycode; dev->input_dev->setkeycode = ir_setkeycode; input_set_drvdata(dev->input_dev, dev); dev->timeout = IR_DEFAULT_TIMEOUT; timer_setup(&dev->timer_keyup, ir_timer_keyup, 0); timer_setup(&dev->timer_repeat, ir_timer_repeat, 0); spin_lock_init(&dev->rc_map.lock); spin_lock_init(&dev->keylock); } mutex_init(&dev->lock); dev->dev.type = &rc_dev_type; dev->dev.class = &rc_class; device_initialize(&dev->dev); dev->driver_type = type; __module_get(THIS_MODULE); return dev; } EXPORT_SYMBOL_GPL(rc_allocate_device); void rc_free_device(struct rc_dev *dev) { if (!dev) return; input_free_device(dev->input_dev); put_device(&dev->dev); /* kfree(dev) will be called by the callback function rc_dev_release() */ module_put(THIS_MODULE); } EXPORT_SYMBOL_GPL(rc_free_device); static void devm_rc_alloc_release(struct device *dev, void *res) { rc_free_device(*(struct rc_dev **)res); } struct rc_dev *devm_rc_allocate_device(struct device *dev, enum rc_driver_type type) { struct rc_dev **dr, *rc; dr = devres_alloc(devm_rc_alloc_release, sizeof(*dr), GFP_KERNEL); if (!dr) return NULL; rc = rc_allocate_device(type); if (!rc) { devres_free(dr); return NULL; } rc->dev.parent = dev; rc->managed_alloc = true; *dr = rc; devres_add(dev, dr); return rc; } EXPORT_SYMBOL_GPL(devm_rc_allocate_device); static int rc_prepare_rx_device(struct rc_dev *dev) { int rc; struct rc_map *rc_map; u64 rc_proto; if (!dev->map_name) return -EINVAL; rc_map = rc_map_get(dev->map_name); if (!rc_map) rc_map = rc_map_get(RC_MAP_EMPTY); if (!rc_map || !rc_map->scan || rc_map->size == 0) return -EINVAL; rc = ir_setkeytable(dev, rc_map); if (rc) return rc; rc_proto = BIT_ULL(rc_map->rc_proto); if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol) dev->enabled_protocols = dev->allowed_protocols; if (dev->driver_type == RC_DRIVER_IR_RAW) ir_raw_load_modules(&rc_proto); if (dev->change_protocol) { rc = dev->change_protocol(dev, &rc_proto); if (rc < 0) goto out_table; dev->enabled_protocols = rc_proto; } /* Keyboard events */ set_bit(EV_KEY, dev->input_dev->evbit); set_bit(EV_REP, dev->input_dev->evbit); set_bit(EV_MSC, dev->input_dev->evbit); set_bit(MSC_SCAN, dev->input_dev->mscbit); /* Pointer/mouse events */ set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit); set_bit(EV_REL, dev->input_dev->evbit); set_bit(REL_X, dev->input_dev->relbit); set_bit(REL_Y, dev->input_dev->relbit); if (dev->open) dev->input_dev->open = ir_open; if (dev->close) dev->input_dev->close = ir_close; dev->input_dev->dev.parent = &dev->dev; memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id)); dev->input_dev->phys = dev->input_phys; dev->input_dev->name = dev->device_name; return 0; out_table: ir_free_table(&dev->rc_map); return rc; } static int rc_setup_rx_device(struct rc_dev *dev) { int rc; /* rc_open will be called here */ rc = input_register_device(dev->input_dev); if (rc) return rc; /* * Default delay of 250ms is too short for some protocols, especially * since the timeout is currently set to 250ms. Increase it to 500ms, * to avoid wrong repetition of the keycodes. Note that this must be * set after the call to input_register_device(). */ if (dev->allowed_protocols == RC_PROTO_BIT_CEC) dev->input_dev->rep[REP_DELAY] = 0; else dev->input_dev->rep[REP_DELAY] = 500; /* * As a repeat event on protocols like RC-5 and NEC take as long as * 110/114ms, using 33ms as a repeat period is not the right thing * to do. */ dev->input_dev->rep[REP_PERIOD] = 125; return 0; } static void rc_free_rx_device(struct rc_dev *dev) { if (!dev) return; if (dev->input_dev) { input_unregister_device(dev->input_dev); dev->input_dev = NULL; } ir_free_table(&dev->rc_map); } int rc_register_device(struct rc_dev *dev) { const char *path; int attr = 0; int minor; int rc; if (!dev) return -EINVAL; minor = ida_alloc_max(&rc_ida, RC_DEV_MAX - 1, GFP_KERNEL); if (minor < 0) return minor; dev->minor = minor; dev_set_name(&dev->dev, "rc%u", dev->minor); dev_set_drvdata(&dev->dev, dev); dev->dev.groups = dev->sysfs_groups; if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol) dev->sysfs_groups[attr++] = &rc_dev_ro_protocol_attr_grp; else if (dev->driver_type != RC_DRIVER_IR_RAW_TX) dev->sysfs_groups[attr++] = &rc_dev_rw_protocol_attr_grp; if (dev->s_filter) dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp; if (dev->s_wakeup_filter) dev->sysfs_groups[attr++] = &rc_dev_wakeup_filter_attr_grp; dev->sysfs_groups[attr++] = NULL; if (dev->driver_type == RC_DRIVER_IR_RAW) { rc = ir_raw_event_prepare(dev); if (rc < 0) goto out_minor; } if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { rc = rc_prepare_rx_device(dev); if (rc) goto out_raw; } dev->registered = true; rc = device_add(&dev->dev); if (rc) goto out_rx_free; path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); dev_info(&dev->dev, "%s as %s\n", dev->device_name ?: "Unspecified device", path ?: "N/A"); kfree(path); /* * once the input device is registered in rc_setup_rx_device, * userspace can open the input device and rc_open() will be called * as a result. This results in driver code being allowed to submit * keycodes with rc_keydown, so lirc must be registered first. */ if (dev->allowed_protocols != RC_PROTO_BIT_CEC) { rc = lirc_register(dev); if (rc < 0) goto out_dev; } if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { rc = rc_setup_rx_device(dev); if (rc) goto out_lirc; } if (dev->driver_type == RC_DRIVER_IR_RAW) { rc = ir_raw_event_register(dev); if (rc < 0) goto out_rx; } dev_dbg(&dev->dev, "Registered rc%u (driver: %s)\n", dev->minor, dev->driver_name ? dev->driver_name : "unknown"); return 0; out_rx: rc_free_rx_device(dev); out_lirc: if (dev->allowed_protocols != RC_PROTO_BIT_CEC) lirc_unregister(dev); out_dev: device_del(&dev->dev); out_rx_free: ir_free_table(&dev->rc_map); out_raw: ir_raw_event_free(dev); out_minor: ida_free(&rc_ida, minor); return rc; } EXPORT_SYMBOL_GPL(rc_register_device); static void devm_rc_release(struct device *dev, void *res) { rc_unregister_device(*(struct rc_dev **)res); } int devm_rc_register_device(struct device *parent, struct rc_dev *dev) { struct rc_dev **dr; int ret; dr = devres_alloc(devm_rc_release, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; ret = rc_register_device(dev); if (ret) { devres_free(dr); return ret; } *dr = dev; devres_add(parent, dr); return 0; } EXPORT_SYMBOL_GPL(devm_rc_register_device); void rc_unregister_device(struct rc_dev *dev) { if (!dev) return; if (dev->driver_type == RC_DRIVER_IR_RAW) ir_raw_event_unregister(dev); timer_delete_sync(&dev->timer_keyup); timer_delete_sync(&dev->timer_repeat); mutex_lock(&dev->lock); if (dev->users && dev->close) dev->close(dev); dev->registered = false; mutex_unlock(&dev->lock); rc_free_rx_device(dev); /* * lirc device should be freed with dev->registered = false, so * that userspace polling will get notified. */ if (dev->allowed_protocols != RC_PROTO_BIT_CEC) lirc_unregister(dev); device_del(&dev->dev); ida_free(&rc_ida, dev->minor); if (!dev->managed_alloc) rc_free_device(dev); } EXPORT_SYMBOL_GPL(rc_unregister_device); /* * Init/exit code for the module. Basically, creates/removes /sys/class/rc */ static int __init rc_core_init(void) { int rc = class_register(&rc_class); if (rc) { pr_err("rc_core: unable to register rc class\n"); return rc; } rc = lirc_dev_init(); if (rc) { pr_err("rc_core: unable to init lirc\n"); class_unregister(&rc_class); return rc; } led_trigger_register_simple("rc-feedback", &led_feedback); rc_map_register(&empty_map); #ifdef CONFIG_MEDIA_CEC_RC rc_map_register(&cec_map); #endif return 0; } static void __exit rc_core_exit(void) { lirc_dev_exit(); class_unregister(&rc_class); led_trigger_unregister_simple(led_feedback); #ifdef CONFIG_MEDIA_CEC_RC rc_map_unregister(&cec_map); #endif rc_map_unregister(&empty_map); } subsys_initcall(rc_core_init); module_exit(rc_core_exit); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_DESCRIPTION("Remote Controller core module"); MODULE_LICENSE("GPL v2"); |
3 3 3 5 5 5 5 5 5 5 5 5 5 5 5 5 4 5 5 5 3 3 3 3 2 5 2 2 2 2 2 2 2 2 2 2 2 2 2 3 1 4 2 2 2 2 3 3 3 2 3 3 3 3 3 3 2 1 1 1 1 1 1 1 1 3 3 5 5 5 3 3 3 3 3 3 5 5 3 3 3 3 5 5 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 5 5 2 5 2 5 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 | // SPDX-License-Identifier: GPL-2.0+ /* * Edgeport USB Serial Converter driver * * Copyright (C) 2000 Inside Out Networks, All rights reserved. * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * * Supports the following devices: * Edgeport/4 * Edgeport/4t * Edgeport/2 * Edgeport/4i * Edgeport/2i * Edgeport/421 * Edgeport/21 * Rapidport/4 * Edgeport/8 * Edgeport/2D8 * Edgeport/4D8 * Edgeport/8i * * For questions or problems with this driver, contact Inside Out * Networks technical support, or Peter Berger <pberger@brimson.com>, * or Al Borchers <alborchers@steinerpoint.com>. * */ #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/serial.h> #include <linux/ioctl.h> #include <linux/wait.h> #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "io_edgeport.h" #include "io_ionsp.h" /* info for the iosp messages */ #include "io_16654.h" /* 16654 UART defines */ #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com> and David Iacovelli" #define DRIVER_DESC "Edgeport USB Serial Driver" #define MAX_NAME_LEN 64 #define OPEN_TIMEOUT (5*HZ) /* 5 seconds */ static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2_DIN) }, { } }; static const struct usb_device_id edgeport_4port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_MT4X56USB) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8_DUAL_CPU) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4_DIN) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_COMPATIBLE) }, { } }; static const struct usb_device_id edgeport_8port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8R) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8RR) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_8) }, { } }; static const struct usb_device_id Epic_port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0311) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0312) }, { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A758) }, { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A794) }, { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A225) }, { } }; /* Devices that this driver supports */ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_MT4X56USB) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8_DUAL_CPU) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2_DIN) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4_DIN) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_COMPATIBLE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8R) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8RR) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_8) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0311) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0312) }, { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A758) }, { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A794) }, { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A225) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table_combined); /* receive port state */ enum RXSTATE { EXPECT_HDR1 = 0, /* Expect header byte 1 */ EXPECT_HDR2 = 1, /* Expect header byte 2 */ EXPECT_DATA = 2, /* Expect 'RxBytesRemaining' data */ EXPECT_HDR3 = 3, /* Expect header byte 3 (for status hdrs only) */ }; /* Transmit Fifo * This Transmit queue is an extension of the edgeport Rx buffer. * The maximum amount of data buffered in both the edgeport * Rx buffer (maxTxCredits) and this buffer will never exceed maxTxCredits. */ struct TxFifo { unsigned int head; /* index to head pointer (write) */ unsigned int tail; /* index to tail pointer (read) */ unsigned int count; /* Bytes in queue */ unsigned int size; /* Max size of queue (equal to Max number of TxCredits) */ unsigned char *fifo; /* allocated Buffer */ }; /* This structure holds all of the local port information */ struct edgeport_port { __u16 txCredits; /* our current credits for this port */ __u16 maxTxCredits; /* the max size of the port */ struct TxFifo txfifo; /* transmit fifo -- size will be maxTxCredits */ struct urb *write_urb; /* write URB for this port */ bool write_in_progress; /* 'true' while a write URB is outstanding */ spinlock_t ep_lock; __u8 shadowLCR; /* last LCR value received */ __u8 shadowMCR; /* last MCR value received */ __u8 shadowMSR; /* last MSR value received */ __u8 shadowLSR; /* last LSR value received */ __u8 shadowXonChar; /* last value set as XON char in Edgeport */ __u8 shadowXoffChar; /* last value set as XOFF char in Edgeport */ __u8 validDataMask; __u32 baudRate; bool open; bool openPending; bool commandPending; bool closePending; bool chaseResponsePending; wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ struct usb_serial_port *port; /* loop back to the owner of this object */ }; /* This structure holds all of the individual device information */ struct edgeport_serial { char name[MAX_NAME_LEN+2]; /* string name of this device */ struct edge_manuf_descriptor manuf_descriptor; /* the manufacturer descriptor */ struct edge_boot_descriptor boot_descriptor; /* the boot firmware descriptor */ struct edgeport_product_info product_info; /* Product Info */ struct edge_compatibility_descriptor epic_descriptor; /* Edgeport compatible descriptor */ int is_epic; /* flag if EPiC device or not */ __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */ unsigned char *interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */ struct urb *interrupt_read_urb; /* our interrupt urb */ __u8 bulk_in_endpoint; /* the bulk in endpoint handle */ unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */ struct urb *read_urb; /* our bulk read urb */ bool read_in_progress; spinlock_t es_lock; __u8 bulk_out_endpoint; /* the bulk out endpoint handle */ __s16 rxBytesAvail; /* the number of bytes that we need to read from this device */ enum RXSTATE rxState; /* the current state of the bulk receive processor */ __u8 rxHeader1; /* receive header byte 1 */ __u8 rxHeader2; /* receive header byte 2 */ __u8 rxHeader3; /* receive header byte 3 */ __u8 rxPort; /* the port that we are currently receiving data for */ __u8 rxStatusCode; /* the receive status code */ __u8 rxStatusParam; /* the receive status parameter */ __s16 rxBytesRemaining; /* the number of port bytes left to read */ struct usb_serial *serial; /* loop back to the owner of this object */ }; /* baud rate information */ struct divisor_table_entry { __u32 BaudRate; __u16 Divisor; }; /* * Define table of divisors for Rev A EdgePort/4 hardware * These assume a 3.6864MHz crystal, the standard /16, and * MCR.7 = 0. */ static const struct divisor_table_entry divisor_table[] = { { 50, 4608}, { 75, 3072}, { 110, 2095}, /* 2094.545455 => 230450 => .0217 % over */ { 134, 1713}, /* 1713.011152 => 230398.5 => .00065% under */ { 150, 1536}, { 300, 768}, { 600, 384}, { 1200, 192}, { 1800, 128}, { 2400, 96}, { 4800, 48}, { 7200, 32}, { 9600, 24}, { 14400, 16}, { 19200, 12}, { 38400, 6}, { 57600, 4}, { 115200, 2}, { 230400, 1}, }; /* Number of outstanding Command Write Urbs */ static atomic_t CmdUrbs = ATOMIC_INIT(0); /* function prototypes */ static void edge_close(struct usb_serial_port *port); static void process_rcvd_data(struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength); static void process_rcvd_status(struct edgeport_serial *edge_serial, __u8 byte2, __u8 byte3); static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data, int length); static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr); static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData, __u8 lsr, __u8 data); static int send_iosp_ext_cmd(struct edgeport_port *edge_port, __u8 command, __u8 param); static int calc_baud_rate_divisor(struct device *dev, int baud_rate, int *divisor); static void change_port_settings(struct tty_struct *tty, struct edgeport_port *edge_port, const struct ktermios *old_termios); static int send_cmd_write_uart_register(struct edgeport_port *edge_port, __u8 regNum, __u8 regValue); static int write_cmd_usb(struct edgeport_port *edge_port, unsigned char *buffer, int writeLength); static void send_more_port_data(struct edgeport_serial *edge_serial, struct edgeport_port *edge_port); static int rom_write(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, const __u8 *data); /* ************************************************************************ */ /* ************************************************************************ */ /* ************************************************************************ */ /* ************************************************************************ */ /************************************************************************ * * * update_edgeport_E2PROM() Compare current versions of * * Boot ROM and Manufacture * * Descriptors with versions * * embedded in this driver * * * ************************************************************************/ static void update_edgeport_E2PROM(struct edgeport_serial *edge_serial) { struct device *dev = &edge_serial->serial->dev->dev; __u32 BootCurVer; __u32 BootNewVer; __u8 BootMajorVersion; __u8 BootMinorVersion; __u16 BootBuildNumber; __u32 Bootaddr; const struct ihex_binrec *rec; const struct firmware *fw; const char *fw_name; int response; switch (edge_serial->product_info.iDownloadFile) { case EDGE_DOWNLOAD_FILE_I930: fw_name = "edgeport/boot.fw"; break; case EDGE_DOWNLOAD_FILE_80251: fw_name = "edgeport/boot2.fw"; break; default: return; } response = request_ihex_firmware(&fw, fw_name, &edge_serial->serial->dev->dev); if (response) { dev_err(dev, "Failed to load image \"%s\" err %d\n", fw_name, response); return; } rec = (const struct ihex_binrec *)fw->data; BootMajorVersion = rec->data[0]; BootMinorVersion = rec->data[1]; BootBuildNumber = (rec->data[2] << 8) | rec->data[3]; /* Check Boot Image Version */ BootCurVer = (edge_serial->boot_descriptor.MajorVersion << 24) + (edge_serial->boot_descriptor.MinorVersion << 16) + le16_to_cpu(edge_serial->boot_descriptor.BuildNumber); BootNewVer = (BootMajorVersion << 24) + (BootMinorVersion << 16) + BootBuildNumber; dev_dbg(dev, "Current Boot Image version %d.%d.%d\n", edge_serial->boot_descriptor.MajorVersion, edge_serial->boot_descriptor.MinorVersion, le16_to_cpu(edge_serial->boot_descriptor.BuildNumber)); if (BootNewVer > BootCurVer) { dev_dbg(dev, "**Update Boot Image from %d.%d.%d to %d.%d.%d\n", edge_serial->boot_descriptor.MajorVersion, edge_serial->boot_descriptor.MinorVersion, le16_to_cpu(edge_serial->boot_descriptor.BuildNumber), BootMajorVersion, BootMinorVersion, BootBuildNumber); dev_dbg(dev, "Downloading new Boot Image\n"); for (rec = ihex_next_binrec(rec); rec; rec = ihex_next_binrec(rec)) { Bootaddr = be32_to_cpu(rec->addr); response = rom_write(edge_serial->serial, Bootaddr >> 16, Bootaddr & 0xFFFF, be16_to_cpu(rec->len), &rec->data[0]); if (response < 0) { dev_err(&edge_serial->serial->dev->dev, "rom_write failed (%x, %x, %d)\n", Bootaddr >> 16, Bootaddr & 0xFFFF, be16_to_cpu(rec->len)); break; } } } else { dev_dbg(dev, "Boot Image -- already up to date\n"); } release_firmware(fw); } static void dump_product_info(struct edgeport_serial *edge_serial, struct edgeport_product_info *product_info) { struct device *dev = &edge_serial->serial->dev->dev; /* Dump Product Info structure */ dev_dbg(dev, "**Product Information:\n"); dev_dbg(dev, " ProductId %x\n", product_info->ProductId); dev_dbg(dev, " NumPorts %d\n", product_info->NumPorts); dev_dbg(dev, " ProdInfoVer %d\n", product_info->ProdInfoVer); dev_dbg(dev, " IsServer %d\n", product_info->IsServer); dev_dbg(dev, " IsRS232 %d\n", product_info->IsRS232); dev_dbg(dev, " IsRS422 %d\n", product_info->IsRS422); dev_dbg(dev, " IsRS485 %d\n", product_info->IsRS485); dev_dbg(dev, " RomSize %d\n", product_info->RomSize); dev_dbg(dev, " RamSize %d\n", product_info->RamSize); dev_dbg(dev, " CpuRev %x\n", product_info->CpuRev); dev_dbg(dev, " BoardRev %x\n", product_info->BoardRev); dev_dbg(dev, " BootMajorVersion %d.%d.%d\n", product_info->BootMajorVersion, product_info->BootMinorVersion, le16_to_cpu(product_info->BootBuildNumber)); dev_dbg(dev, " FirmwareMajorVersion %d.%d.%d\n", product_info->FirmwareMajorVersion, product_info->FirmwareMinorVersion, le16_to_cpu(product_info->FirmwareBuildNumber)); dev_dbg(dev, " ManufactureDescDate %d/%d/%d\n", product_info->ManufactureDescDate[0], product_info->ManufactureDescDate[1], product_info->ManufactureDescDate[2]+1900); dev_dbg(dev, " iDownloadFile 0x%x\n", product_info->iDownloadFile); dev_dbg(dev, " EpicVer %d\n", product_info->EpicVer); } static void get_product_info(struct edgeport_serial *edge_serial) { struct edgeport_product_info *product_info = &edge_serial->product_info; memset(product_info, 0, sizeof(struct edgeport_product_info)); product_info->ProductId = (__u16)(le16_to_cpu(edge_serial->serial->dev->descriptor.idProduct) & ~ION_DEVICE_ID_80251_NETCHIP); product_info->NumPorts = edge_serial->manuf_descriptor.NumPorts; product_info->ProdInfoVer = 0; product_info->RomSize = edge_serial->manuf_descriptor.RomSize; product_info->RamSize = edge_serial->manuf_descriptor.RamSize; product_info->CpuRev = edge_serial->manuf_descriptor.CpuRev; product_info->BoardRev = edge_serial->manuf_descriptor.BoardRev; product_info->BootMajorVersion = edge_serial->boot_descriptor.MajorVersion; product_info->BootMinorVersion = edge_serial->boot_descriptor.MinorVersion; product_info->BootBuildNumber = edge_serial->boot_descriptor.BuildNumber; memcpy(product_info->ManufactureDescDate, edge_serial->manuf_descriptor.DescDate, sizeof(edge_serial->manuf_descriptor.DescDate)); /* check if this is 2nd generation hardware */ if (le16_to_cpu(edge_serial->serial->dev->descriptor.idProduct) & ION_DEVICE_ID_80251_NETCHIP) product_info->iDownloadFile = EDGE_DOWNLOAD_FILE_80251; else product_info->iDownloadFile = EDGE_DOWNLOAD_FILE_I930; /* Determine Product type and set appropriate flags */ switch (DEVICE_ID_FROM_USB_PRODUCT_ID(product_info->ProductId)) { case ION_DEVICE_ID_EDGEPORT_COMPATIBLE: case ION_DEVICE_ID_EDGEPORT_4T: case ION_DEVICE_ID_EDGEPORT_4: case ION_DEVICE_ID_EDGEPORT_2: case ION_DEVICE_ID_EDGEPORT_8_DUAL_CPU: case ION_DEVICE_ID_EDGEPORT_8: case ION_DEVICE_ID_EDGEPORT_421: case ION_DEVICE_ID_EDGEPORT_21: case ION_DEVICE_ID_EDGEPORT_2_DIN: case ION_DEVICE_ID_EDGEPORT_4_DIN: case ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU: product_info->IsRS232 = 1; break; case ION_DEVICE_ID_EDGEPORT_2I: /* Edgeport/2 RS422/RS485 */ product_info->IsRS422 = 1; product_info->IsRS485 = 1; break; case ION_DEVICE_ID_EDGEPORT_8I: /* Edgeport/4 RS422 */ case ION_DEVICE_ID_EDGEPORT_4I: /* Edgeport/4 RS422 */ product_info->IsRS422 = 1; break; } dump_product_info(edge_serial, product_info); } static int get_epic_descriptor(struct edgeport_serial *ep) { int result; struct usb_serial *serial = ep->serial; struct edgeport_product_info *product_info = &ep->product_info; struct edge_compatibility_descriptor *epic; struct edge_compatibility_bits *bits; struct device *dev = &serial->dev->dev; ep->is_epic = 0; epic = kmalloc(sizeof(*epic), GFP_KERNEL); if (!epic) return -ENOMEM; result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQUEST_ION_GET_EPIC_DESC, 0xC0, 0x00, 0x00, epic, sizeof(*epic), 300); if (result == sizeof(*epic)) { ep->is_epic = 1; memcpy(&ep->epic_descriptor, epic, sizeof(*epic)); memset(product_info, 0, sizeof(struct edgeport_product_info)); product_info->NumPorts = epic->NumPorts; product_info->ProdInfoVer = 0; product_info->FirmwareMajorVersion = epic->MajorVersion; product_info->FirmwareMinorVersion = epic->MinorVersion; product_info->FirmwareBuildNumber = epic->BuildNumber; product_info->iDownloadFile = epic->iDownloadFile; product_info->EpicVer = epic->EpicVer; product_info->Epic = epic->Supports; product_info->ProductId = ION_DEVICE_ID_EDGEPORT_COMPATIBLE; dump_product_info(ep, product_info); bits = &ep->epic_descriptor.Supports; dev_dbg(dev, "**EPIC descriptor:\n"); dev_dbg(dev, " VendEnableSuspend: %s\n", bits->VendEnableSuspend ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPOpen : %s\n", bits->IOSPOpen ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPClose : %s\n", bits->IOSPClose ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPChase : %s\n", bits->IOSPChase ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPSetRxFlow : %s\n", bits->IOSPSetRxFlow ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPSetTxFlow : %s\n", bits->IOSPSetTxFlow ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPSetXChar : %s\n", bits->IOSPSetXChar ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPRxCheck : %s\n", bits->IOSPRxCheck ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPSetClrBreak : %s\n", bits->IOSPSetClrBreak ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPWriteMCR : %s\n", bits->IOSPWriteMCR ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPWriteLCR : %s\n", bits->IOSPWriteLCR ? "TRUE": "FALSE"); dev_dbg(dev, " IOSPSetBaudRate : %s\n", bits->IOSPSetBaudRate ? "TRUE": "FALSE"); dev_dbg(dev, " TrueEdgeport : %s\n", bits->TrueEdgeport ? "TRUE": "FALSE"); result = 0; } else if (result >= 0) { dev_warn(&serial->interface->dev, "short epic descriptor received: %d\n", result); result = -EIO; } kfree(epic); return result; } /************************************************************************/ /************************************************************************/ /* U S B C A L L B A C K F U N C T I O N S */ /* U S B C A L L B A C K F U N C T I O N S */ /************************************************************************/ /************************************************************************/ /***************************************************************************** * edge_interrupt_callback * this is the callback function for when we have received data on the * interrupt endpoint. *****************************************************************************/ static void edge_interrupt_callback(struct urb *urb) { struct edgeport_serial *edge_serial = urb->context; struct device *dev; struct edgeport_port *edge_port; struct usb_serial_port *port; unsigned char *data = urb->transfer_buffer; int length = urb->actual_length; unsigned long flags; int bytes_avail; int position; int txCredits; int portNumber; int result; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } dev = &edge_serial->serial->dev->dev; /* process this interrupt-read even if there are no ports open */ if (length) { usb_serial_debug_data(dev, __func__, length, data); if (length > 1) { bytes_avail = data[0] | (data[1] << 8); if (bytes_avail) { spin_lock_irqsave(&edge_serial->es_lock, flags); edge_serial->rxBytesAvail += bytes_avail; dev_dbg(dev, "%s - bytes_avail=%d, rxBytesAvail=%d, read_in_progress=%d\n", __func__, bytes_avail, edge_serial->rxBytesAvail, edge_serial->read_in_progress); if (edge_serial->rxBytesAvail > 0 && !edge_serial->read_in_progress) { dev_dbg(dev, "%s - posting a read\n", __func__); edge_serial->read_in_progress = true; /* we have pending bytes on the bulk in pipe, send a request */ result = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC); if (result) { dev_err(dev, "%s - usb_submit_urb(read bulk) failed with result = %d\n", __func__, result); edge_serial->read_in_progress = false; } } spin_unlock_irqrestore(&edge_serial->es_lock, flags); } } /* grab the txcredits for the ports if available */ position = 2; portNumber = 0; while ((position < length - 1) && (portNumber < edge_serial->serial->num_ports)) { txCredits = data[position] | (data[position+1] << 8); if (txCredits) { port = edge_serial->serial->port[portNumber]; edge_port = usb_get_serial_port_data(port); if (edge_port && edge_port->open) { spin_lock_irqsave(&edge_port->ep_lock, flags); edge_port->txCredits += txCredits; spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(dev, "%s - txcredits for port%d = %d\n", __func__, portNumber, edge_port->txCredits); /* tell the tty driver that something has changed */ tty_port_tty_wakeup(&edge_port->port->port); /* Since we have more credit, check if more data can be sent */ send_more_port_data(edge_serial, edge_port); } } position += 2; ++portNumber; } } exit: result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_err(&urb->dev->dev, "%s - Error %d submitting control urb\n", __func__, result); } /***************************************************************************** * edge_bulk_in_callback * this is the callback function for when we have received data on the * bulk in endpoint. *****************************************************************************/ static void edge_bulk_in_callback(struct urb *urb) { struct edgeport_serial *edge_serial = urb->context; struct device *dev; unsigned char *data = urb->transfer_buffer; int retval; __u16 raw_data_length; int status = urb->status; unsigned long flags; if (status) { dev_dbg(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status); edge_serial->read_in_progress = false; return; } if (urb->actual_length == 0) { dev_dbg(&urb->dev->dev, "%s - read bulk callback with no data\n", __func__); edge_serial->read_in_progress = false; return; } dev = &edge_serial->serial->dev->dev; raw_data_length = urb->actual_length; usb_serial_debug_data(dev, __func__, raw_data_length, data); spin_lock_irqsave(&edge_serial->es_lock, flags); /* decrement our rxBytes available by the number that we just got */ edge_serial->rxBytesAvail -= raw_data_length; dev_dbg(dev, "%s - Received = %d, rxBytesAvail %d\n", __func__, raw_data_length, edge_serial->rxBytesAvail); process_rcvd_data(edge_serial, data, urb->actual_length); /* check to see if there's any more data for us to read */ if (edge_serial->rxBytesAvail > 0) { dev_dbg(dev, "%s - posting a read\n", __func__); retval = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC); if (retval) { dev_err(dev, "%s - usb_submit_urb(read bulk) failed, retval = %d\n", __func__, retval); edge_serial->read_in_progress = false; } } else { edge_serial->read_in_progress = false; } spin_unlock_irqrestore(&edge_serial->es_lock, flags); } /***************************************************************************** * edge_bulk_out_data_callback * this is the callback function for when we have finished sending * serial data on the bulk out endpoint. *****************************************************************************/ static void edge_bulk_out_data_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; int status = urb->status; if (status) { dev_dbg(&urb->dev->dev, "%s - nonzero write bulk status received: %d\n", __func__, status); } if (edge_port->open) tty_port_tty_wakeup(&edge_port->port->port); /* Release the Write URB */ edge_port->write_in_progress = false; /* Check if more data needs to be sent */ send_more_port_data((struct edgeport_serial *) (usb_get_serial_data(edge_port->port->serial)), edge_port); } /***************************************************************************** * BulkOutCmdCallback * this is the callback function for when we have finished sending a * command on the bulk out endpoint. *****************************************************************************/ static void edge_bulk_out_cmd_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; struct device *dev = &urb->dev->dev; int status = urb->status; atomic_dec(&CmdUrbs); dev_dbg(dev, "%s - FREE URB %p (outstanding %d)\n", __func__, urb, atomic_read(&CmdUrbs)); /* clean up the transfer buffer */ kfree(urb->transfer_buffer); /* Free the command urb */ usb_free_urb(urb); if (status) { dev_dbg(dev, "%s - nonzero write bulk status received: %d\n", __func__, status); return; } /* tell the tty driver that something has changed */ if (edge_port->open) tty_port_tty_wakeup(&edge_port->port->port); /* we have completed the command */ edge_port->commandPending = false; wake_up(&edge_port->wait_command); } /***************************************************************************** * Driver tty interface functions *****************************************************************************/ /***************************************************************************** * SerialOpen * this function is called by the tty driver when a port is opened * If successful, we return 0 * Otherwise we return a negative error number. *****************************************************************************/ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct device *dev = &port->dev; struct usb_serial *serial; struct edgeport_serial *edge_serial; int response; if (edge_port == NULL) return -ENODEV; /* see if we've set up our endpoint info yet (can't set it up in edge_startup as the structures were not set up at that time.) */ serial = port->serial; edge_serial = usb_get_serial_data(serial); if (edge_serial == NULL) return -ENODEV; if (edge_serial->interrupt_in_buffer == NULL) { struct usb_serial_port *port0 = serial->port[0]; /* not set up yet, so do it now */ edge_serial->interrupt_in_buffer = port0->interrupt_in_buffer; edge_serial->interrupt_in_endpoint = port0->interrupt_in_endpointAddress; edge_serial->interrupt_read_urb = port0->interrupt_in_urb; edge_serial->bulk_in_buffer = port0->bulk_in_buffer; edge_serial->bulk_in_endpoint = port0->bulk_in_endpointAddress; edge_serial->read_urb = port0->read_urb; edge_serial->bulk_out_endpoint = port0->bulk_out_endpointAddress; /* set up our interrupt urb */ usb_fill_int_urb(edge_serial->interrupt_read_urb, serial->dev, usb_rcvintpipe(serial->dev, port0->interrupt_in_endpointAddress), port0->interrupt_in_buffer, edge_serial->interrupt_read_urb->transfer_buffer_length, edge_interrupt_callback, edge_serial, edge_serial->interrupt_read_urb->interval); /* set up our bulk in urb */ usb_fill_bulk_urb(edge_serial->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, port0->bulk_in_endpointAddress), port0->bulk_in_buffer, edge_serial->read_urb->transfer_buffer_length, edge_bulk_in_callback, edge_serial); edge_serial->read_in_progress = false; /* start interrupt read for this edgeport * this interrupt will continue as long * as the edgeport is connected */ response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL); if (response) { dev_err(dev, "%s - Error %d submitting control urb\n", __func__, response); } } /* initialize our wait queues */ init_waitqueue_head(&edge_port->wait_open); init_waitqueue_head(&edge_port->wait_chase); init_waitqueue_head(&edge_port->wait_command); /* initialize our port settings */ edge_port->txCredits = 0; /* Can't send any data yet */ /* Must always set this bit to enable ints! */ edge_port->shadowMCR = MCR_MASTER_IE; edge_port->chaseResponsePending = false; /* send a open port command */ edge_port->openPending = true; edge_port->open = false; response = send_iosp_ext_cmd(edge_port, IOSP_CMD_OPEN_PORT, 0); if (response < 0) { dev_err(dev, "%s - error sending open port command\n", __func__); edge_port->openPending = false; return -ENODEV; } /* now wait for the port to be completely opened */ wait_event_timeout(edge_port->wait_open, !edge_port->openPending, OPEN_TIMEOUT); if (!edge_port->open) { /* open timed out */ dev_dbg(dev, "%s - open timeout\n", __func__); edge_port->openPending = false; return -ENODEV; } /* create the txfifo */ edge_port->txfifo.head = 0; edge_port->txfifo.tail = 0; edge_port->txfifo.count = 0; edge_port->txfifo.size = edge_port->maxTxCredits; edge_port->txfifo.fifo = kmalloc(edge_port->maxTxCredits, GFP_KERNEL); if (!edge_port->txfifo.fifo) { edge_close(port); return -ENOMEM; } /* Allocate a URB for the write */ edge_port->write_urb = usb_alloc_urb(0, GFP_KERNEL); edge_port->write_in_progress = false; if (!edge_port->write_urb) { edge_close(port); return -ENOMEM; } dev_dbg(dev, "%s - Initialize TX fifo to %d bytes\n", __func__, edge_port->maxTxCredits); return 0; } /************************************************************************ * * block_until_chase_response * * This function will block the close until one of the following: * 1. Response to our Chase comes from Edgeport * 2. A timeout of 10 seconds without activity has expired * (1K of Edgeport data @ 2400 baud ==> 4 sec to empty) * ************************************************************************/ static void block_until_chase_response(struct edgeport_port *edge_port) { struct device *dev = &edge_port->port->dev; DEFINE_WAIT(wait); __u16 lastCredits; int timeout = 1*HZ; int loop = 10; while (1) { /* Save Last credits */ lastCredits = edge_port->txCredits; /* Did we get our Chase response */ if (!edge_port->chaseResponsePending) { dev_dbg(dev, "%s - Got Chase Response\n", __func__); /* did we get all of our credit back? */ if (edge_port->txCredits == edge_port->maxTxCredits) { dev_dbg(dev, "%s - Got all credits\n", __func__); return; } } /* Block the thread for a while */ prepare_to_wait(&edge_port->wait_chase, &wait, TASK_UNINTERRUPTIBLE); schedule_timeout(timeout); finish_wait(&edge_port->wait_chase, &wait); if (lastCredits == edge_port->txCredits) { /* No activity.. count down. */ loop--; if (loop == 0) { edge_port->chaseResponsePending = false; dev_dbg(dev, "%s - Chase TIMEOUT\n", __func__); return; } } else { /* Reset timeout value back to 10 seconds */ dev_dbg(dev, "%s - Last %d, Current %d\n", __func__, lastCredits, edge_port->txCredits); loop = 10; } } } /************************************************************************ * * block_until_tx_empty * * This function will block the close until one of the following: * 1. TX count are 0 * 2. The edgeport has stopped * 3. A timeout of 3 seconds without activity has expired * ************************************************************************/ static void block_until_tx_empty(struct edgeport_port *edge_port) { struct device *dev = &edge_port->port->dev; DEFINE_WAIT(wait); struct TxFifo *fifo = &edge_port->txfifo; __u32 lastCount; int timeout = HZ/10; int loop = 30; while (1) { /* Save Last count */ lastCount = fifo->count; /* Is the Edgeport Buffer empty? */ if (lastCount == 0) { dev_dbg(dev, "%s - TX Buffer Empty\n", __func__); return; } /* Block the thread for a while */ prepare_to_wait(&edge_port->wait_chase, &wait, TASK_UNINTERRUPTIBLE); schedule_timeout(timeout); finish_wait(&edge_port->wait_chase, &wait); dev_dbg(dev, "%s wait\n", __func__); if (lastCount == fifo->count) { /* No activity.. count down. */ loop--; if (loop == 0) { dev_dbg(dev, "%s - TIMEOUT\n", __func__); return; } } else { /* Reset timeout value back to seconds */ loop = 30; } } } /***************************************************************************** * edge_close * this function is called by the tty driver when a port is closed *****************************************************************************/ static void edge_close(struct usb_serial_port *port) { struct edgeport_serial *edge_serial; struct edgeport_port *edge_port; int status; edge_serial = usb_get_serial_data(port->serial); edge_port = usb_get_serial_port_data(port); if (edge_serial == NULL || edge_port == NULL) return; /* block until tx is empty */ block_until_tx_empty(edge_port); edge_port->closePending = true; if (!edge_serial->is_epic || edge_serial->epic_descriptor.Supports.IOSPChase) { /* flush and chase */ edge_port->chaseResponsePending = true; dev_dbg(&port->dev, "%s - Sending IOSP_CMD_CHASE_PORT\n", __func__); status = send_iosp_ext_cmd(edge_port, IOSP_CMD_CHASE_PORT, 0); if (status == 0) /* block until chase finished */ block_until_chase_response(edge_port); else edge_port->chaseResponsePending = false; } if (!edge_serial->is_epic || edge_serial->epic_descriptor.Supports.IOSPClose) { /* close the port */ dev_dbg(&port->dev, "%s - Sending IOSP_CMD_CLOSE_PORT\n", __func__); send_iosp_ext_cmd(edge_port, IOSP_CMD_CLOSE_PORT, 0); } /* port->close = true; */ edge_port->closePending = false; edge_port->open = false; edge_port->openPending = false; usb_kill_urb(edge_port->write_urb); if (edge_port->write_urb) { /* if this urb had a transfer buffer already (old transfer) free it */ kfree(edge_port->write_urb->transfer_buffer); usb_free_urb(edge_port->write_urb); edge_port->write_urb = NULL; } kfree(edge_port->txfifo.fifo); edge_port->txfifo.fifo = NULL; } /***************************************************************************** * SerialWrite * this function is called by the tty driver when data should be written * to the port. * If successful, we return the number of bytes written, otherwise we * return a negative error number. *****************************************************************************/ static int edge_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct TxFifo *fifo; int copySize; int bytesleft; int firsthalf; int secondhalf; unsigned long flags; if (edge_port == NULL) return -ENODEV; /* get a pointer to the Tx fifo */ fifo = &edge_port->txfifo; spin_lock_irqsave(&edge_port->ep_lock, flags); /* calculate number of bytes to put in fifo */ copySize = min_t(unsigned int, count, (edge_port->txCredits - fifo->count)); dev_dbg(&port->dev, "%s of %d byte(s) Fifo room %d -- will copy %d bytes\n", __func__, count, edge_port->txCredits - fifo->count, copySize); /* catch writes of 0 bytes which the tty driver likes to give us, and when txCredits is empty */ if (copySize == 0) { dev_dbg(&port->dev, "%s - copySize = Zero\n", __func__); goto finish_write; } /* queue the data * since we can never overflow the buffer we do not have to check for a * full condition * * the copy is done is two parts -- first fill to the end of the buffer * then copy the reset from the start of the buffer */ bytesleft = fifo->size - fifo->head; firsthalf = min(bytesleft, copySize); dev_dbg(&port->dev, "%s - copy %d bytes of %d into fifo \n", __func__, firsthalf, bytesleft); /* now copy our data */ memcpy(&fifo->fifo[fifo->head], data, firsthalf); usb_serial_debug_data(&port->dev, __func__, firsthalf, &fifo->fifo[fifo->head]); /* update the index and size */ fifo->head += firsthalf; fifo->count += firsthalf; /* wrap the index */ if (fifo->head == fifo->size) fifo->head = 0; secondhalf = copySize-firsthalf; if (secondhalf) { dev_dbg(&port->dev, "%s - copy rest of data %d\n", __func__, secondhalf); memcpy(&fifo->fifo[fifo->head], &data[firsthalf], secondhalf); usb_serial_debug_data(&port->dev, __func__, secondhalf, &fifo->fifo[fifo->head]); /* update the index and size */ fifo->count += secondhalf; fifo->head += secondhalf; /* No need to check for wrap since we can not get to end of * the fifo in this part */ } finish_write: spin_unlock_irqrestore(&edge_port->ep_lock, flags); send_more_port_data((struct edgeport_serial *) usb_get_serial_data(port->serial), edge_port); dev_dbg(&port->dev, "%s wrote %d byte(s) TxCredits %d, Fifo %d\n", __func__, copySize, edge_port->txCredits, fifo->count); return copySize; } /************************************************************************ * * send_more_port_data() * * This routine attempts to write additional UART transmit data * to a port over the USB bulk pipe. It is called (1) when new * data has been written to a port's TxBuffer from higher layers * (2) when the peripheral sends us additional TxCredits indicating * that it can accept more Tx data for a given port; and (3) when * a bulk write completes successfully and we want to see if we * can transmit more. * ************************************************************************/ static void send_more_port_data(struct edgeport_serial *edge_serial, struct edgeport_port *edge_port) { struct TxFifo *fifo = &edge_port->txfifo; struct device *dev = &edge_port->port->dev; struct urb *urb; unsigned char *buffer; int status; int count; int bytesleft; int firsthalf; int secondhalf; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->write_in_progress || !edge_port->open || (fifo->count == 0)) { dev_dbg(dev, "%s EXIT - fifo %d, PendingWrite = %d\n", __func__, fifo->count, edge_port->write_in_progress); goto exit_send; } /* since the amount of data in the fifo will always fit into the * edgeport buffer we do not need to check the write length * * Do we have enough credits for this port to make it worthwhile * to bother queueing a write. If it's too small, say a few bytes, * it's better to wait for more credits so we can do a larger write. */ if (edge_port->txCredits < EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(edge_port->maxTxCredits, EDGE_FW_BULK_MAX_PACKET_SIZE)) { dev_dbg(dev, "%s Not enough credit - fifo %d TxCredit %d\n", __func__, fifo->count, edge_port->txCredits); goto exit_send; } /* lock this write */ edge_port->write_in_progress = true; /* get a pointer to the write_urb */ urb = edge_port->write_urb; /* make sure transfer buffer is freed */ kfree(urb->transfer_buffer); urb->transfer_buffer = NULL; /* build the data header for the buffer and port that we are about to send out */ count = fifo->count; buffer = kmalloc(count+2, GFP_ATOMIC); if (!buffer) { edge_port->write_in_progress = false; goto exit_send; } buffer[0] = IOSP_BUILD_DATA_HDR1(edge_port->port->port_number, count); buffer[1] = IOSP_BUILD_DATA_HDR2(edge_port->port->port_number, count); /* now copy our data */ bytesleft = fifo->size - fifo->tail; firsthalf = min(bytesleft, count); memcpy(&buffer[2], &fifo->fifo[fifo->tail], firsthalf); fifo->tail += firsthalf; fifo->count -= firsthalf; if (fifo->tail == fifo->size) fifo->tail = 0; secondhalf = count-firsthalf; if (secondhalf) { memcpy(&buffer[2+firsthalf], &fifo->fifo[fifo->tail], secondhalf); fifo->tail += secondhalf; fifo->count -= secondhalf; } if (count) usb_serial_debug_data(&edge_port->port->dev, __func__, count, &buffer[2]); /* fill up the urb with all of our data and submit it */ usb_fill_bulk_urb(urb, edge_serial->serial->dev, usb_sndbulkpipe(edge_serial->serial->dev, edge_serial->bulk_out_endpoint), buffer, count+2, edge_bulk_out_data_callback, edge_port); /* decrement the number of credits we have by the number we just sent */ edge_port->txCredits -= count; edge_port->port->icount.tx += count; status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { /* something went wrong */ dev_err_console(edge_port->port, "%s - usb_submit_urb(write bulk) failed, status = %d, data lost\n", __func__, status); edge_port->write_in_progress = false; /* revert the credits as something bad happened. */ edge_port->txCredits += count; edge_port->port->icount.tx -= count; } dev_dbg(dev, "%s wrote %d byte(s) TxCredit %d, Fifo %d\n", __func__, count, edge_port->txCredits, fifo->count); exit_send: spin_unlock_irqrestore(&edge_port->ep_lock, flags); } /***************************************************************************** * edge_write_room * this function is called by the tty driver when it wants to know how * many bytes of data we can accept for a specific port. *****************************************************************************/ static unsigned int edge_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int room; unsigned long flags; /* total of both buffers is still txCredit */ spin_lock_irqsave(&edge_port->ep_lock, flags); room = edge_port->txCredits - edge_port->txfifo.count; spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(&port->dev, "%s - returns %u\n", __func__, room); return room; } /***************************************************************************** * edge_chars_in_buffer * this function is called by the tty driver when it wants to know how * many bytes of data we currently have outstanding in the port (data that * has been written, but hasn't made it out the port yet) *****************************************************************************/ static unsigned int edge_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int num_chars; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); num_chars = edge_port->maxTxCredits - edge_port->txCredits + edge_port->txfifo.count; spin_unlock_irqrestore(&edge_port->ep_lock, flags); if (num_chars) { dev_dbg(&port->dev, "%s - returns %u\n", __func__, num_chars); } return num_chars; } /***************************************************************************** * SerialThrottle * this function is called by the tty driver when it wants to stop the data * being read from the port. *****************************************************************************/ static void edge_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; if (edge_port == NULL) return; if (!edge_port->open) { dev_dbg(&port->dev, "%s - port not opened\n", __func__); return; } /* if we are implementing XON/XOFF, send the stop character */ if (I_IXOFF(tty)) { unsigned char stop_char = STOP_CHAR(tty); status = edge_write(tty, port, &stop_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (C_CRTSCTS(tty)) { edge_port->shadowMCR &= ~MCR_RTS; status = send_cmd_write_uart_register(edge_port, MCR, edge_port->shadowMCR); if (status != 0) return; } } /***************************************************************************** * edge_unthrottle * this function is called by the tty driver when it wants to resume the * data being read from the port (called after SerialThrottle is called) *****************************************************************************/ static void edge_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; if (edge_port == NULL) return; if (!edge_port->open) { dev_dbg(&port->dev, "%s - port not opened\n", __func__); return; } /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = edge_write(tty, port, &start_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (C_CRTSCTS(tty)) { edge_port->shadowMCR |= MCR_RTS; send_cmd_write_uart_register(edge_port, MCR, edge_port->shadowMCR); } } /***************************************************************************** * SerialSetTermios * this function is called by the tty driver when it wants to change * the termios structure *****************************************************************************/ static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); if (edge_port == NULL) return; if (!edge_port->open) { dev_dbg(&port->dev, "%s - port not opened\n", __func__); return; } /* change the port settings to the new ones specified */ change_port_settings(tty, edge_port, old_termios); } /***************************************************************************** * get_lsr_info - get line status register info * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. *****************************************************************************/ static int get_lsr_info(struct edgeport_port *edge_port, unsigned int __user *value) { unsigned int result = 0; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->maxTxCredits == edge_port->txCredits && edge_port->txfifo.count == 0) { dev_dbg(&edge_port->port->dev, "%s -- Empty\n", __func__); result = TIOCSER_TEMT; } spin_unlock_irqrestore(&edge_port->ep_lock, flags); if (copy_to_user(value, &result, sizeof(int))) return -EFAULT; return 0; } static int edge_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int mcr; mcr = edge_port->shadowMCR; if (set & TIOCM_RTS) mcr |= MCR_RTS; if (set & TIOCM_DTR) mcr |= MCR_DTR; if (set & TIOCM_LOOP) mcr |= MCR_LOOPBACK; if (clear & TIOCM_RTS) mcr &= ~MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~MCR_LOOPBACK; edge_port->shadowMCR = mcr; send_cmd_write_uart_register(edge_port, MCR, edge_port->shadowMCR); return 0; } static int edge_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int result = 0; unsigned int msr; unsigned int mcr; msr = edge_port->shadowMSR; mcr = edge_port->shadowMCR; result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */ | ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */ | ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */ | ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */ | ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */ | ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */ return result; } /***************************************************************************** * SerialIoctl * this function handles any ioctl calls to the driver *****************************************************************************/ static int edge_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); switch (cmd) { case TIOCSERGETLSR: dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__); return get_lsr_info(edge_port, (unsigned int __user *) arg); } return -ENOIOCTLCMD; } /***************************************************************************** * SerialBreak * this function sends a break to the port *****************************************************************************/ static int edge_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct edgeport_serial *edge_serial = usb_get_serial_data(port->serial); int status = 0; if (!edge_serial->is_epic || edge_serial->epic_descriptor.Supports.IOSPChase) { /* flush and chase */ edge_port->chaseResponsePending = true; dev_dbg(&port->dev, "%s - Sending IOSP_CMD_CHASE_PORT\n", __func__); status = send_iosp_ext_cmd(edge_port, IOSP_CMD_CHASE_PORT, 0); if (status == 0) { /* block until chase finished */ block_until_chase_response(edge_port); } else { edge_port->chaseResponsePending = false; } } if (!edge_serial->is_epic || edge_serial->epic_descriptor.Supports.IOSPSetClrBreak) { if (break_state == -1) { dev_dbg(&port->dev, "%s - Sending IOSP_CMD_SET_BREAK\n", __func__); status = send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_BREAK, 0); } else { dev_dbg(&port->dev, "%s - Sending IOSP_CMD_CLEAR_BREAK\n", __func__); status = send_iosp_ext_cmd(edge_port, IOSP_CMD_CLEAR_BREAK, 0); } if (status) dev_dbg(&port->dev, "%s - error sending break set/clear command.\n", __func__); } return status; } /***************************************************************************** * process_rcvd_data * this function handles the data received on the bulk in pipe. *****************************************************************************/ static void process_rcvd_data(struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength) { struct usb_serial *serial = edge_serial->serial; struct device *dev = &serial->dev->dev; struct usb_serial_port *port; struct edgeport_port *edge_port; __u16 lastBufferLength; __u16 rxLen; lastBufferLength = bufferLength + 1; while (bufferLength > 0) { /* failsafe incase we get a message that we don't understand */ if (lastBufferLength == bufferLength) { dev_dbg(dev, "%s - stuck in loop, exiting it.\n", __func__); break; } lastBufferLength = bufferLength; switch (edge_serial->rxState) { case EXPECT_HDR1: edge_serial->rxHeader1 = *buffer; ++buffer; --bufferLength; if (bufferLength == 0) { edge_serial->rxState = EXPECT_HDR2; break; } fallthrough; case EXPECT_HDR2: edge_serial->rxHeader2 = *buffer; ++buffer; --bufferLength; dev_dbg(dev, "%s - Hdr1=%02X Hdr2=%02X\n", __func__, edge_serial->rxHeader1, edge_serial->rxHeader2); /* Process depending on whether this header is * data or status */ if (IS_CMD_STAT_HDR(edge_serial->rxHeader1)) { /* Decode this status header and go to * EXPECT_HDR1 (if we can process the status * with only 2 bytes), or go to EXPECT_HDR3 to * get the third byte. */ edge_serial->rxPort = IOSP_GET_HDR_PORT(edge_serial->rxHeader1); edge_serial->rxStatusCode = IOSP_GET_STATUS_CODE( edge_serial->rxHeader1); if (!IOSP_STATUS_IS_2BYTE( edge_serial->rxStatusCode)) { /* This status needs additional bytes. * Save what we have and then wait for * more data. */ edge_serial->rxStatusParam = edge_serial->rxHeader2; edge_serial->rxState = EXPECT_HDR3; break; } /* We have all the header bytes, process the status now */ process_rcvd_status(edge_serial, edge_serial->rxHeader2, 0); edge_serial->rxState = EXPECT_HDR1; break; } edge_serial->rxPort = IOSP_GET_HDR_PORT(edge_serial->rxHeader1); edge_serial->rxBytesRemaining = IOSP_GET_HDR_DATA_LEN(edge_serial->rxHeader1, edge_serial->rxHeader2); dev_dbg(dev, "%s - Data for Port %u Len %u\n", __func__, edge_serial->rxPort, edge_serial->rxBytesRemaining); if (bufferLength == 0) { edge_serial->rxState = EXPECT_DATA; break; } fallthrough; case EXPECT_DATA: /* Expect data */ if (bufferLength < edge_serial->rxBytesRemaining) { rxLen = bufferLength; /* Expect data to start next buffer */ edge_serial->rxState = EXPECT_DATA; } else { /* BufLen >= RxBytesRemaining */ rxLen = edge_serial->rxBytesRemaining; /* Start another header next time */ edge_serial->rxState = EXPECT_HDR1; } bufferLength -= rxLen; edge_serial->rxBytesRemaining -= rxLen; /* spit this data back into the tty driver if this port is open */ if (rxLen && edge_serial->rxPort < serial->num_ports) { port = serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); if (edge_port && edge_port->open) { dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n", __func__, rxLen, edge_serial->rxPort); edge_tty_recv(edge_port->port, buffer, rxLen); edge_port->port->icount.rx += rxLen; } } buffer += rxLen; break; case EXPECT_HDR3: /* Expect 3rd byte of status header */ edge_serial->rxHeader3 = *buffer; ++buffer; --bufferLength; /* We have all the header bytes, process the status now */ process_rcvd_status(edge_serial, edge_serial->rxStatusParam, edge_serial->rxHeader3); edge_serial->rxState = EXPECT_HDR1; break; } } } /***************************************************************************** * process_rcvd_status * this function handles the any status messages received on the * bulk in pipe. *****************************************************************************/ static void process_rcvd_status(struct edgeport_serial *edge_serial, __u8 byte2, __u8 byte3) { struct usb_serial_port *port; struct edgeport_port *edge_port; struct tty_struct *tty; struct device *dev; __u8 code = edge_serial->rxStatusCode; /* switch the port pointer to the one being currently talked about */ if (edge_serial->rxPort >= edge_serial->serial->num_ports) return; port = edge_serial->serial->port[edge_serial->rxPort]; edge_port = usb_get_serial_port_data(port); if (edge_port == NULL) { dev_err(&edge_serial->serial->dev->dev, "%s - edge_port == NULL for port %d\n", __func__, edge_serial->rxPort); return; } dev = &port->dev; if (code == IOSP_EXT_STATUS) { switch (byte2) { case IOSP_EXT_STATUS_CHASE_RSP: /* we want to do EXT status regardless of port * open/closed */ dev_dbg(dev, "%s - Port %u EXT CHASE_RSP Data = %02x\n", __func__, edge_serial->rxPort, byte3); /* Currently, the only EXT_STATUS is Chase, so process * here instead of one more call to one more subroutine * If/when more EXT_STATUS, there'll be more work to do * Also, we currently clear flag and close the port * regardless of content of above's Byte3. * We could choose to do something else when Byte3 says * Timeout on Chase from Edgeport, like wait longer in * block_until_chase_response, but for now we don't. */ edge_port->chaseResponsePending = false; wake_up(&edge_port->wait_chase); return; case IOSP_EXT_STATUS_RX_CHECK_RSP: dev_dbg(dev, "%s ========== Port %u CHECK_RSP Sequence = %02x =============\n", __func__, edge_serial->rxPort, byte3); /* Port->RxCheckRsp = true; */ return; } } if (code == IOSP_STATUS_OPEN_RSP) { edge_port->txCredits = GET_TX_BUFFER_SIZE(byte3); edge_port->maxTxCredits = edge_port->txCredits; dev_dbg(dev, "%s - Port %u Open Response Initial MSR = %02x TxBufferSize = %d\n", __func__, edge_serial->rxPort, byte2, edge_port->txCredits); handle_new_msr(edge_port, byte2); /* send the current line settings to the port so we are in sync with any further termios calls */ tty = tty_port_tty_get(&edge_port->port->port); if (tty) { change_port_settings(tty, edge_port, &tty->termios); tty_kref_put(tty); } /* we have completed the open */ edge_port->openPending = false; edge_port->open = true; wake_up(&edge_port->wait_open); return; } /* If port is closed, silently discard all rcvd status. We can * have cases where buffered status is received AFTER the close * port command is sent to the Edgeport. */ if (!edge_port->open || edge_port->closePending) return; switch (code) { /* Not currently sent by Edgeport */ case IOSP_STATUS_LSR: dev_dbg(dev, "%s - Port %u LSR Status = %02x\n", __func__, edge_serial->rxPort, byte2); handle_new_lsr(edge_port, false, byte2, 0); break; case IOSP_STATUS_LSR_DATA: dev_dbg(dev, "%s - Port %u LSR Status = %02x, Data = %02x\n", __func__, edge_serial->rxPort, byte2, byte3); /* byte2 is LSR Register */ /* byte3 is broken data byte */ handle_new_lsr(edge_port, true, byte2, byte3); break; /* * case IOSP_EXT_4_STATUS: * dev_dbg(dev, "%s - Port %u LSR Status = %02x Data = %02x\n", * __func__, edge_serial->rxPort, byte2, byte3); * break; */ case IOSP_STATUS_MSR: dev_dbg(dev, "%s - Port %u MSR Status = %02x\n", __func__, edge_serial->rxPort, byte2); /* * Process this new modem status and generate appropriate * events, etc, based on the new status. This routine * also saves the MSR in Port->ShadowMsr. */ handle_new_msr(edge_port, byte2); break; default: dev_dbg(dev, "%s - Unrecognized IOSP status code %u\n", __func__, code); break; } } /***************************************************************************** * edge_tty_recv * this function passes data on to the tty flip buffer *****************************************************************************/ static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data, int length) { int cnt; cnt = tty_insert_flip_string(&port->port, data, length); if (cnt < length) { dev_err(&port->dev, "%s - dropping data, %d bytes lost\n", __func__, length - cnt); } data += cnt; length -= cnt; tty_flip_buffer_push(&port->port); } /***************************************************************************** * handle_new_msr * this function handles any change to the msr register for a port. *****************************************************************************/ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr) { struct async_icount *icount; if (newMsr & (EDGEPORT_MSR_DELTA_CTS | EDGEPORT_MSR_DELTA_DSR | EDGEPORT_MSR_DELTA_RI | EDGEPORT_MSR_DELTA_CD)) { icount = &edge_port->port->icount; /* update input line counters */ if (newMsr & EDGEPORT_MSR_DELTA_CTS) icount->cts++; if (newMsr & EDGEPORT_MSR_DELTA_DSR) icount->dsr++; if (newMsr & EDGEPORT_MSR_DELTA_CD) icount->dcd++; if (newMsr & EDGEPORT_MSR_DELTA_RI) icount->rng++; wake_up_interruptible(&edge_port->port->port.delta_msr_wait); } /* Save the new modem status */ edge_port->shadowMSR = newMsr & 0xf0; } /***************************************************************************** * handle_new_lsr * this function handles any change to the lsr register for a port. *****************************************************************************/ static void handle_new_lsr(struct edgeport_port *edge_port, __u8 lsrData, __u8 lsr, __u8 data) { __u8 newLsr = (__u8) (lsr & (__u8) (LSR_OVER_ERR | LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK)); struct async_icount *icount; edge_port->shadowLSR = lsr; if (newLsr & LSR_BREAK) { /* * Parity and Framing errors only count if they * occur exclusive of a break being * received. */ newLsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK); } /* Place LSR data byte into Rx buffer */ if (lsrData) edge_tty_recv(edge_port->port, &data, 1); /* update input line counters */ icount = &edge_port->port->icount; if (newLsr & LSR_BREAK) icount->brk++; if (newLsr & LSR_OVER_ERR) icount->overrun++; if (newLsr & LSR_PAR_ERR) icount->parity++; if (newLsr & LSR_FRM_ERR) icount->frame++; } /**************************************************************************** * sram_write * writes a number of bytes to the Edgeport device's sram starting at the * given address. * If successful returns the number of bytes written, otherwise it returns * a negative error number of the problem. ****************************************************************************/ static int sram_write(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, const __u8 *data) { int result; __u16 current_length; unsigned char *transfer_buffer; dev_dbg(&serial->dev->dev, "%s - %x, %x, %d\n", __func__, extAddr, addr, length); transfer_buffer = kmalloc(64, GFP_KERNEL); if (!transfer_buffer) return -ENOMEM; /* need to split these writes up into 64 byte chunks */ result = 0; while (length > 0) { if (length > 64) current_length = 64; else current_length = length; /* dev_dbg(&serial->dev->dev, "%s - writing %x, %x, %d\n", __func__, extAddr, addr, current_length); */ memcpy(transfer_buffer, data, current_length); result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), USB_REQUEST_ION_WRITE_RAM, 0x40, addr, extAddr, transfer_buffer, current_length, 300); if (result < 0) break; length -= current_length; addr += current_length; data += current_length; } kfree(transfer_buffer); return result; } /**************************************************************************** * rom_write * writes a number of bytes to the Edgeport device's ROM starting at the * given address. * If successful returns the number of bytes written, otherwise it returns * a negative error number of the problem. ****************************************************************************/ static int rom_write(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, const __u8 *data) { int result; __u16 current_length; unsigned char *transfer_buffer; transfer_buffer = kmalloc(64, GFP_KERNEL); if (!transfer_buffer) return -ENOMEM; /* need to split these writes up into 64 byte chunks */ result = 0; while (length > 0) { if (length > 64) current_length = 64; else current_length = length; memcpy(transfer_buffer, data, current_length); result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), USB_REQUEST_ION_WRITE_ROM, 0x40, addr, extAddr, transfer_buffer, current_length, 300); if (result < 0) break; length -= current_length; addr += current_length; data += current_length; } kfree(transfer_buffer); return result; } /**************************************************************************** * rom_read * reads a number of bytes from the Edgeport device starting at the given * address. * Returns zero on success or a negative error number. ****************************************************************************/ static int rom_read(struct usb_serial *serial, __u16 extAddr, __u16 addr, __u16 length, __u8 *data) { int result; __u16 current_length; unsigned char *transfer_buffer; transfer_buffer = kmalloc(64, GFP_KERNEL); if (!transfer_buffer) return -ENOMEM; /* need to split these reads up into 64 byte chunks */ result = 0; while (length > 0) { if (length > 64) current_length = 64; else current_length = length; result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQUEST_ION_READ_ROM, 0xC0, addr, extAddr, transfer_buffer, current_length, 300); if (result < current_length) { if (result >= 0) result = -EIO; break; } memcpy(data, transfer_buffer, current_length); length -= current_length; addr += current_length; data += current_length; result = 0; } kfree(transfer_buffer); return result; } /**************************************************************************** * send_iosp_ext_cmd * Is used to send a IOSP message to the Edgeport device ****************************************************************************/ static int send_iosp_ext_cmd(struct edgeport_port *edge_port, __u8 command, __u8 param) { unsigned char *buffer; unsigned char *currentCommand; int length = 0; int status = 0; buffer = kmalloc(10, GFP_ATOMIC); if (!buffer) return -ENOMEM; currentCommand = buffer; MAKE_CMD_EXT_CMD(¤tCommand, &length, edge_port->port->port_number, command, param); status = write_cmd_usb(edge_port, buffer, length); if (status) { /* something bad happened, let's free up the memory */ kfree(buffer); } return status; } /***************************************************************************** * write_cmd_usb * this function writes the given buffer out to the bulk write endpoint. *****************************************************************************/ static int write_cmd_usb(struct edgeport_port *edge_port, unsigned char *buffer, int length) { struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial); struct device *dev = &edge_port->port->dev; int status = 0; struct urb *urb; usb_serial_debug_data(dev, __func__, length, buffer); /* Allocate our next urb */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; atomic_inc(&CmdUrbs); dev_dbg(dev, "%s - ALLOCATE URB %p (outstanding %d)\n", __func__, urb, atomic_read(&CmdUrbs)); usb_fill_bulk_urb(urb, edge_serial->serial->dev, usb_sndbulkpipe(edge_serial->serial->dev, edge_serial->bulk_out_endpoint), buffer, length, edge_bulk_out_cmd_callback, edge_port); edge_port->commandPending = true; status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { /* something went wrong */ dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n", __func__, status); usb_free_urb(urb); atomic_dec(&CmdUrbs); return status; } #if 0 wait_event(&edge_port->wait_command, !edge_port->commandPending); if (edge_port->commandPending) { /* command timed out */ dev_dbg(dev, "%s - command timed out\n", __func__); status = -EINVAL; } #endif return status; } /***************************************************************************** * send_cmd_write_baud_rate * this function sends the proper command to change the baud rate of the * specified port. *****************************************************************************/ static int send_cmd_write_baud_rate(struct edgeport_port *edge_port, int baudRate) { struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial); struct device *dev = &edge_port->port->dev; unsigned char *cmdBuffer; unsigned char *currCmd; int cmdLen = 0; int divisor; int status; u32 number = edge_port->port->port_number; if (edge_serial->is_epic && !edge_serial->epic_descriptor.Supports.IOSPSetBaudRate) { dev_dbg(dev, "SendCmdWriteBaudRate - NOT Setting baud rate for port, baud = %d\n", baudRate); return 0; } dev_dbg(dev, "%s - baud = %d\n", __func__, baudRate); status = calc_baud_rate_divisor(dev, baudRate, &divisor); if (status) { dev_err(dev, "%s - bad baud rate\n", __func__); return status; } /* Alloc memory for the string of commands. */ cmdBuffer = kmalloc(0x100, GFP_ATOMIC); if (!cmdBuffer) return -ENOMEM; currCmd = cmdBuffer; /* Enable access to divisor latch */ MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, LCR, LCR_DL_ENABLE); /* Write the divisor itself */ MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, DLL, LOW8(divisor)); MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, DLM, HIGH8(divisor)); /* Restore original value to disable access to divisor latch */ MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, number, LCR, edge_port->shadowLCR); status = write_cmd_usb(edge_port, cmdBuffer, cmdLen); if (status) { /* something bad happened, let's free up the memory */ kfree(cmdBuffer); } return status; } /***************************************************************************** * calc_baud_rate_divisor * this function calculates the proper baud rate divisor for the specified * baud rate. *****************************************************************************/ static int calc_baud_rate_divisor(struct device *dev, int baudrate, int *divisor) { int i; __u16 custom; for (i = 0; i < ARRAY_SIZE(divisor_table); i++) { if (divisor_table[i].BaudRate == baudrate) { *divisor = divisor_table[i].Divisor; return 0; } } /* We have tried all of the standard baud rates * lets try to calculate the divisor for this baud rate * Make sure the baud rate is reasonable */ if (baudrate > 50 && baudrate < 230400) { /* get divisor */ custom = (__u16)((230400L + baudrate/2) / baudrate); *divisor = custom; dev_dbg(dev, "%s - Baud %d = %d\n", __func__, baudrate, custom); return 0; } return -1; } /***************************************************************************** * send_cmd_write_uart_register * this function builds up a uart register message and sends to the device. *****************************************************************************/ static int send_cmd_write_uart_register(struct edgeport_port *edge_port, __u8 regNum, __u8 regValue) { struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial); struct device *dev = &edge_port->port->dev; unsigned char *cmdBuffer; unsigned char *currCmd; unsigned long cmdLen = 0; int status; dev_dbg(dev, "%s - write to %s register 0x%02x\n", (regNum == MCR) ? "MCR" : "LCR", __func__, regValue); if (edge_serial->is_epic && !edge_serial->epic_descriptor.Supports.IOSPWriteMCR && regNum == MCR) { dev_dbg(dev, "SendCmdWriteUartReg - Not writing to MCR Register\n"); return 0; } if (edge_serial->is_epic && !edge_serial->epic_descriptor.Supports.IOSPWriteLCR && regNum == LCR) { dev_dbg(dev, "SendCmdWriteUartReg - Not writing to LCR Register\n"); return 0; } /* Alloc memory for the string of commands. */ cmdBuffer = kmalloc(0x10, GFP_ATOMIC); if (cmdBuffer == NULL) return -ENOMEM; currCmd = cmdBuffer; /* Build a cmd in the buffer to write the given register */ MAKE_CMD_WRITE_REG(&currCmd, &cmdLen, edge_port->port->port_number, regNum, regValue); status = write_cmd_usb(edge_port, cmdBuffer, cmdLen); if (status) { /* something bad happened, let's free up the memory */ kfree(cmdBuffer); } return status; } /***************************************************************************** * change_port_settings * This routine is called to set the UART on the device to match the * specified new settings. *****************************************************************************/ static void change_port_settings(struct tty_struct *tty, struct edgeport_port *edge_port, const struct ktermios *old_termios) { struct device *dev = &edge_port->port->dev; struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial); int baud; unsigned cflag; __u8 mask = 0xff; __u8 lData; __u8 lParity; __u8 lStop; __u8 rxFlow; __u8 txFlow; int status; if (!edge_port->open && !edge_port->openPending) { dev_dbg(dev, "%s - port not opened\n", __func__); return; } cflag = tty->termios.c_cflag; switch (cflag & CSIZE) { case CS5: lData = LCR_BITS_5; mask = 0x1f; dev_dbg(dev, "%s - data bits = 5\n", __func__); break; case CS6: lData = LCR_BITS_6; mask = 0x3f; dev_dbg(dev, "%s - data bits = 6\n", __func__); break; case CS7: lData = LCR_BITS_7; mask = 0x7f; dev_dbg(dev, "%s - data bits = 7\n", __func__); break; default: case CS8: lData = LCR_BITS_8; dev_dbg(dev, "%s - data bits = 8\n", __func__); break; } lParity = LCR_PAR_NONE; if (cflag & PARENB) { if (cflag & CMSPAR) { if (cflag & PARODD) { lParity = LCR_PAR_MARK; dev_dbg(dev, "%s - parity = mark\n", __func__); } else { lParity = LCR_PAR_SPACE; dev_dbg(dev, "%s - parity = space\n", __func__); } } else if (cflag & PARODD) { lParity = LCR_PAR_ODD; dev_dbg(dev, "%s - parity = odd\n", __func__); } else { lParity = LCR_PAR_EVEN; dev_dbg(dev, "%s - parity = even\n", __func__); } } else { dev_dbg(dev, "%s - parity = none\n", __func__); } if (cflag & CSTOPB) { lStop = LCR_STOP_2; dev_dbg(dev, "%s - stop bits = 2\n", __func__); } else { lStop = LCR_STOP_1; dev_dbg(dev, "%s - stop bits = 1\n", __func__); } /* figure out the flow control settings */ rxFlow = txFlow = 0x00; if (cflag & CRTSCTS) { rxFlow |= IOSP_RX_FLOW_RTS; txFlow |= IOSP_TX_FLOW_CTS; dev_dbg(dev, "%s - RTS/CTS is enabled\n", __func__); } else { dev_dbg(dev, "%s - RTS/CTS is disabled\n", __func__); } /* if we are implementing XON/XOFF, set the start and stop character in the device */ if (I_IXOFF(tty) || I_IXON(tty)) { unsigned char stop_char = STOP_CHAR(tty); unsigned char start_char = START_CHAR(tty); if (!edge_serial->is_epic || edge_serial->epic_descriptor.Supports.IOSPSetXChar) { send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_XON_CHAR, start_char); send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_XOFF_CHAR, stop_char); } /* if we are implementing INBOUND XON/XOFF */ if (I_IXOFF(tty)) { rxFlow |= IOSP_RX_FLOW_XON_XOFF; dev_dbg(dev, "%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n", __func__, start_char, stop_char); } else { dev_dbg(dev, "%s - INBOUND XON/XOFF is disabled\n", __func__); } /* if we are implementing OUTBOUND XON/XOFF */ if (I_IXON(tty)) { txFlow |= IOSP_TX_FLOW_XON_XOFF; dev_dbg(dev, "%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n", __func__, start_char, stop_char); } else { dev_dbg(dev, "%s - OUTBOUND XON/XOFF is disabled\n", __func__); } } /* Set flow control to the configured value */ if (!edge_serial->is_epic || edge_serial->epic_descriptor.Supports.IOSPSetRxFlow) send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_RX_FLOW, rxFlow); if (!edge_serial->is_epic || edge_serial->epic_descriptor.Supports.IOSPSetTxFlow) send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_TX_FLOW, txFlow); edge_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); edge_port->shadowLCR |= (lData | lParity | lStop); edge_port->validDataMask = mask; /* Send the updated LCR value to the EdgePort */ status = send_cmd_write_uart_register(edge_port, LCR, edge_port->shadowLCR); if (status != 0) return; /* set up the MCR register and send it to the EdgePort */ edge_port->shadowMCR = MCR_MASTER_IE; if (cflag & CBAUD) edge_port->shadowMCR |= (MCR_DTR | MCR_RTS); status = send_cmd_write_uart_register(edge_port, MCR, edge_port->shadowMCR); if (status != 0) return; /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(tty); if (!baud) { /* pick a default, any default... */ baud = 9600; } dev_dbg(dev, "%s - baud rate = %d\n", __func__, baud); status = send_cmd_write_baud_rate(edge_port, baud); if (status == -1) { /* Speed change was not possible - put back the old speed */ baud = tty_termios_baud_rate(old_termios); tty_encode_baud_rate(tty, baud, baud); } } /**************************************************************************** * unicode_to_ascii * Turns a string from Unicode into ASCII. * Doesn't do a good job with any characters that are outside the normal * ASCII range, but it's only for debugging... * NOTE: expects the unicode in LE format ****************************************************************************/ static void unicode_to_ascii(char *string, int buflen, __le16 *unicode, int unicode_size) { int i; if (buflen <= 0) /* never happens, but... */ return; --buflen; /* space for nul */ for (i = 0; i < unicode_size; i++) { if (i >= buflen) break; string[i] = (char)(le16_to_cpu(unicode[i])); } string[i] = 0x00; } /**************************************************************************** * get_manufacturing_desc * reads in the manufacturing descriptor and stores it into the serial * structure. ****************************************************************************/ static void get_manufacturing_desc(struct edgeport_serial *edge_serial) { struct device *dev = &edge_serial->serial->dev->dev; int response; dev_dbg(dev, "getting manufacturer descriptor\n"); response = rom_read(edge_serial->serial, (EDGE_MANUF_DESC_ADDR & 0xffff0000) >> 16, (__u16)(EDGE_MANUF_DESC_ADDR & 0x0000ffff), EDGE_MANUF_DESC_LEN, (__u8 *)(&edge_serial->manuf_descriptor)); if (response < 0) { dev_err(dev, "error in getting manufacturer descriptor: %d\n", response); } else { char string[30]; dev_dbg(dev, "**Manufacturer Descriptor\n"); dev_dbg(dev, " RomSize: %dK\n", edge_serial->manuf_descriptor.RomSize); dev_dbg(dev, " RamSize: %dK\n", edge_serial->manuf_descriptor.RamSize); dev_dbg(dev, " CpuRev: %d\n", edge_serial->manuf_descriptor.CpuRev); dev_dbg(dev, " BoardRev: %d\n", edge_serial->manuf_descriptor.BoardRev); dev_dbg(dev, " NumPorts: %d\n", edge_serial->manuf_descriptor.NumPorts); dev_dbg(dev, " DescDate: %d/%d/%d\n", edge_serial->manuf_descriptor.DescDate[0], edge_serial->manuf_descriptor.DescDate[1], edge_serial->manuf_descriptor.DescDate[2]+1900); unicode_to_ascii(string, sizeof(string), edge_serial->manuf_descriptor.SerialNumber, edge_serial->manuf_descriptor.SerNumLength/2); dev_dbg(dev, " SerialNumber: %s\n", string); unicode_to_ascii(string, sizeof(string), edge_serial->manuf_descriptor.AssemblyNumber, edge_serial->manuf_descriptor.AssemblyNumLength/2); dev_dbg(dev, " AssemblyNumber: %s\n", string); unicode_to_ascii(string, sizeof(string), edge_serial->manuf_descriptor.OemAssyNumber, edge_serial->manuf_descriptor.OemAssyNumLength/2); dev_dbg(dev, " OemAssyNumber: %s\n", string); dev_dbg(dev, " UartType: %d\n", edge_serial->manuf_descriptor.UartType); dev_dbg(dev, " IonPid: %d\n", edge_serial->manuf_descriptor.IonPid); dev_dbg(dev, " IonConfig: %d\n", edge_serial->manuf_descriptor.IonConfig); } } /**************************************************************************** * get_boot_desc * reads in the bootloader descriptor and stores it into the serial * structure. ****************************************************************************/ static void get_boot_desc(struct edgeport_serial *edge_serial) { struct device *dev = &edge_serial->serial->dev->dev; int response; dev_dbg(dev, "getting boot descriptor\n"); response = rom_read(edge_serial->serial, (EDGE_BOOT_DESC_ADDR & 0xffff0000) >> 16, (__u16)(EDGE_BOOT_DESC_ADDR & 0x0000ffff), EDGE_BOOT_DESC_LEN, (__u8 *)(&edge_serial->boot_descriptor)); if (response < 0) { dev_err(dev, "error in getting boot descriptor: %d\n", response); } else { dev_dbg(dev, "**Boot Descriptor:\n"); dev_dbg(dev, " BootCodeLength: %d\n", le16_to_cpu(edge_serial->boot_descriptor.BootCodeLength)); dev_dbg(dev, " MajorVersion: %d\n", edge_serial->boot_descriptor.MajorVersion); dev_dbg(dev, " MinorVersion: %d\n", edge_serial->boot_descriptor.MinorVersion); dev_dbg(dev, " BuildNumber: %d\n", le16_to_cpu(edge_serial->boot_descriptor.BuildNumber)); dev_dbg(dev, " Capabilities: 0x%x\n", le16_to_cpu(edge_serial->boot_descriptor.Capabilities)); dev_dbg(dev, " UConfig0: %d\n", edge_serial->boot_descriptor.UConfig0); dev_dbg(dev, " UConfig1: %d\n", edge_serial->boot_descriptor.UConfig1); } } /**************************************************************************** * load_application_firmware * This is called to load the application firmware to the device ****************************************************************************/ static void load_application_firmware(struct edgeport_serial *edge_serial) { struct device *dev = &edge_serial->serial->dev->dev; const struct ihex_binrec *rec; const struct firmware *fw; const char *fw_name; const char *fw_info; int response; __u32 Operaddr; __u16 build; switch (edge_serial->product_info.iDownloadFile) { case EDGE_DOWNLOAD_FILE_I930: fw_info = "downloading firmware version (930)"; fw_name = "edgeport/down.fw"; break; case EDGE_DOWNLOAD_FILE_80251: fw_info = "downloading firmware version (80251)"; fw_name = "edgeport/down2.fw"; break; case EDGE_DOWNLOAD_FILE_NONE: dev_dbg(dev, "No download file specified, skipping download\n"); return; default: return; } response = request_ihex_firmware(&fw, fw_name, &edge_serial->serial->dev->dev); if (response) { dev_err(dev, "Failed to load image \"%s\" err %d\n", fw_name, response); return; } rec = (const struct ihex_binrec *)fw->data; build = (rec->data[2] << 8) | rec->data[3]; dev_dbg(dev, "%s %d.%d.%d\n", fw_info, rec->data[0], rec->data[1], build); edge_serial->product_info.FirmwareMajorVersion = rec->data[0]; edge_serial->product_info.FirmwareMinorVersion = rec->data[1]; edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build); for (rec = ihex_next_binrec(rec); rec; rec = ihex_next_binrec(rec)) { Operaddr = be32_to_cpu(rec->addr); response = sram_write(edge_serial->serial, Operaddr >> 16, Operaddr & 0xFFFF, be16_to_cpu(rec->len), &rec->data[0]); if (response < 0) { dev_err(&edge_serial->serial->dev->dev, "sram_write failed (%x, %x, %d)\n", Operaddr >> 16, Operaddr & 0xFFFF, be16_to_cpu(rec->len)); break; } } dev_dbg(dev, "sending exec_dl_code\n"); response = usb_control_msg (edge_serial->serial->dev, usb_sndctrlpipe(edge_serial->serial->dev, 0), USB_REQUEST_ION_EXEC_DL_CODE, 0x40, 0x4000, 0x0001, NULL, 0, 3000); release_firmware(fw); } /**************************************************************************** * edge_startup ****************************************************************************/ static int edge_startup(struct usb_serial *serial) { struct edgeport_serial *edge_serial; struct usb_device *dev; struct device *ddev = &serial->dev->dev; int i; int response; bool interrupt_in_found; bool bulk_in_found; bool bulk_out_found; static const __u32 descriptor[3] = { EDGE_COMPATIBILITY_MASK0, EDGE_COMPATIBILITY_MASK1, EDGE_COMPATIBILITY_MASK2 }; dev = serial->dev; /* create our private serial structure */ edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); if (!edge_serial) return -ENOMEM; spin_lock_init(&edge_serial->es_lock); edge_serial->serial = serial; usb_set_serial_data(serial, edge_serial); /* get the name for the device from the device */ i = usb_string(dev, dev->descriptor.iManufacturer, &edge_serial->name[0], MAX_NAME_LEN+1); if (i < 0) i = 0; edge_serial->name[i++] = ' '; usb_string(dev, dev->descriptor.iProduct, &edge_serial->name[i], MAX_NAME_LEN+2 - i); dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name); /* Read the epic descriptor */ if (get_epic_descriptor(edge_serial) < 0) { /* memcpy descriptor to Supports structures */ memcpy(&edge_serial->epic_descriptor.Supports, descriptor, sizeof(struct edge_compatibility_bits)); /* get the manufacturing descriptor for this device */ get_manufacturing_desc(edge_serial); /* get the boot descriptor */ get_boot_desc(edge_serial); get_product_info(edge_serial); } /* set the number of ports from the manufacturing description */ /* serial->num_ports = serial->product_info.NumPorts; */ if ((!edge_serial->is_epic) && (edge_serial->product_info.NumPorts != serial->num_ports)) { dev_warn(ddev, "Device Reported %d serial ports vs. core thinking we have %d ports, email greg@kroah.com this information.\n", edge_serial->product_info.NumPorts, serial->num_ports); } dev_dbg(ddev, "%s - time 1 %ld\n", __func__, jiffies); /* If not an EPiC device */ if (!edge_serial->is_epic) { /* now load the application firmware into this device */ load_application_firmware(edge_serial); dev_dbg(ddev, "%s - time 2 %ld\n", __func__, jiffies); /* Check current Edgeport EEPROM and update if necessary */ update_edgeport_E2PROM(edge_serial); dev_dbg(ddev, "%s - time 3 %ld\n", __func__, jiffies); /* set the configuration to use #1 */ /* dev_dbg(ddev, "set_configuration 1\n"); */ /* usb_set_configuration (dev, 1); */ } dev_dbg(ddev, " FirmwareMajorVersion %d.%d.%d\n", edge_serial->product_info.FirmwareMajorVersion, edge_serial->product_info.FirmwareMinorVersion, le16_to_cpu(edge_serial->product_info.FirmwareBuildNumber)); /* we set up the pointers to the endpoints in the edge_open function, * as the structures aren't created yet. */ response = 0; if (edge_serial->is_epic) { struct usb_host_interface *alt; alt = serial->interface->cur_altsetting; /* EPIC thing, set up our interrupt polling now and our read * urb, so that the device knows it really is connected. */ interrupt_in_found = bulk_in_found = bulk_out_found = false; for (i = 0; i < alt->desc.bNumEndpoints; ++i) { struct usb_endpoint_descriptor *endpoint; int buffer_size; endpoint = &alt->endpoint[i].desc; buffer_size = usb_endpoint_maxp(endpoint); if (!interrupt_in_found && (usb_endpoint_is_int_in(endpoint))) { /* we found a interrupt in endpoint */ dev_dbg(ddev, "found interrupt in\n"); /* not set up yet, so do it now */ edge_serial->interrupt_read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!edge_serial->interrupt_read_urb) { response = -ENOMEM; break; } edge_serial->interrupt_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!edge_serial->interrupt_in_buffer) { response = -ENOMEM; break; } edge_serial->interrupt_in_endpoint = endpoint->bEndpointAddress; /* set up our interrupt urb */ usb_fill_int_urb( edge_serial->interrupt_read_urb, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), edge_serial->interrupt_in_buffer, buffer_size, edge_interrupt_callback, edge_serial, endpoint->bInterval); interrupt_in_found = true; } if (!bulk_in_found && (usb_endpoint_is_bulk_in(endpoint))) { /* we found a bulk in endpoint */ dev_dbg(ddev, "found bulk in\n"); /* not set up yet, so do it now */ edge_serial->read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!edge_serial->read_urb) { response = -ENOMEM; break; } edge_serial->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!edge_serial->bulk_in_buffer) { response = -ENOMEM; break; } edge_serial->bulk_in_endpoint = endpoint->bEndpointAddress; /* set up our bulk in urb */ usb_fill_bulk_urb(edge_serial->read_urb, dev, usb_rcvbulkpipe(dev, endpoint->bEndpointAddress), edge_serial->bulk_in_buffer, usb_endpoint_maxp(endpoint), edge_bulk_in_callback, edge_serial); bulk_in_found = true; } if (!bulk_out_found && (usb_endpoint_is_bulk_out(endpoint))) { /* we found a bulk out endpoint */ dev_dbg(ddev, "found bulk out\n"); edge_serial->bulk_out_endpoint = endpoint->bEndpointAddress; bulk_out_found = true; } } if (response || !interrupt_in_found || !bulk_in_found || !bulk_out_found) { if (!response) { dev_err(ddev, "expected endpoints not found\n"); response = -ENODEV; } goto error; } /* start interrupt read for this edgeport this interrupt will * continue as long as the edgeport is connected */ response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL); if (response) { dev_err(ddev, "%s - Error %d submitting control urb\n", __func__, response); goto error; } } return response; error: usb_free_urb(edge_serial->interrupt_read_urb); kfree(edge_serial->interrupt_in_buffer); usb_free_urb(edge_serial->read_urb); kfree(edge_serial->bulk_in_buffer); kfree(edge_serial); return response; } /**************************************************************************** * edge_disconnect * This function is called whenever the device is removed from the usb bus. ****************************************************************************/ static void edge_disconnect(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); if (edge_serial->is_epic) { usb_kill_urb(edge_serial->interrupt_read_urb); usb_kill_urb(edge_serial->read_urb); } } /**************************************************************************** * edge_release * This function is called when the device structure is deallocated. ****************************************************************************/ static void edge_release(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); if (edge_serial->is_epic) { usb_kill_urb(edge_serial->interrupt_read_urb); usb_free_urb(edge_serial->interrupt_read_urb); kfree(edge_serial->interrupt_in_buffer); usb_kill_urb(edge_serial->read_urb); usb_free_urb(edge_serial->read_urb); kfree(edge_serial->bulk_in_buffer); } kfree(edge_serial); } static int edge_port_probe(struct usb_serial_port *port) { struct edgeport_port *edge_port; edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL); if (!edge_port) return -ENOMEM; spin_lock_init(&edge_port->ep_lock); edge_port->port = port; usb_set_serial_port_data(port, edge_port); return 0; } static void edge_port_remove(struct usb_serial_port *port) { struct edgeport_port *edge_port; edge_port = usb_get_serial_port_data(port); kfree(edge_port); } static struct usb_serial_driver edgeport_2port_device = { .driver = { .name = "edgeport_2", }, .description = "Edgeport 2 port adapter", .id_table = edgeport_2port_id_table, .num_ports = 2, .num_bulk_in = 1, .num_bulk_out = 1, .num_interrupt_in = 1, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_data_callback, }; static struct usb_serial_driver edgeport_4port_device = { .driver = { .name = "edgeport_4", }, .description = "Edgeport 4 port adapter", .id_table = edgeport_4port_id_table, .num_ports = 4, .num_bulk_in = 1, .num_bulk_out = 1, .num_interrupt_in = 1, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_data_callback, }; static struct usb_serial_driver edgeport_8port_device = { .driver = { .name = "edgeport_8", }, .description = "Edgeport 8 port adapter", .id_table = edgeport_8port_id_table, .num_ports = 8, .num_bulk_in = 1, .num_bulk_out = 1, .num_interrupt_in = 1, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_data_callback, }; static struct usb_serial_driver epic_device = { .driver = { .name = "epic", }, .description = "EPiC device", .id_table = Epic_port_id_table, .num_ports = 1, .num_bulk_in = 1, .num_bulk_out = 1, .num_interrupt_in = 1, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_data_callback, }; static struct usb_serial_driver * const serial_drivers[] = { &edgeport_2port_device, &edgeport_4port_device, &edgeport_8port_device, &epic_device, NULL }; module_usb_serial_driver(serial_drivers, id_table_combined); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("edgeport/boot.fw"); MODULE_FIRMWARE("edgeport/boot2.fw"); MODULE_FIRMWARE("edgeport/down.fw"); MODULE_FIRMWARE("edgeport/down2.fw"); |
4 4 4 6 6 6 6 6 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 | // SPDX-License-Identifier: GPL-2.0-only /* Common methods for dibusb-based-receivers. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "dibusb.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (|-able))." DVB_USB_DEBUG_STATUS); MODULE_DESCRIPTION("Common methods for dibusb-based receivers"); MODULE_LICENSE("GPL"); #define deb_info(args...) dprintk(debug,0x01,args) /* common stuff used by the different dibusb modules */ int dibusb_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; if (st->ops.fifo_ctrl != NULL) if (st->ops.fifo_ctrl(adap->fe_adap[0].fe, onoff)) { err("error while controlling the fifo of the demod."); return -ENODEV; } } return 0; } EXPORT_SYMBOL(dibusb_streaming_ctrl); int dibusb_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; if (st->ops.pid_ctrl != NULL) st->ops.pid_ctrl(adap->fe_adap[0].fe, index, pid, onoff); } return 0; } EXPORT_SYMBOL(dibusb_pid_filter); int dibusb_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; if (st->ops.pid_parse != NULL) if (st->ops.pid_parse(adap->fe_adap[0].fe, onoff) < 0) err("could not handle pid_parser"); } return 0; } EXPORT_SYMBOL(dibusb_pid_filter_ctrl); int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 *b; int ret; b = kmalloc(3, GFP_KERNEL); if (!b) return -ENOMEM; b[0] = DIBUSB_REQ_SET_IOCTL; b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP; ret = dvb_usb_generic_write(d, b, 3); kfree(b); msleep(10); return ret; } EXPORT_SYMBOL(dibusb_power_ctrl); int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { int ret; u8 *b; b = kmalloc(3, GFP_KERNEL); if (!b) return -ENOMEM; if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0) goto ret; if (onoff) { b[0] = DIBUSB_REQ_SET_STREAMING_MODE; b[1] = 0x00; ret = dvb_usb_generic_write(adap->dev, b, 2); if (ret < 0) goto ret; } b[0] = DIBUSB_REQ_SET_IOCTL; b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM; ret = dvb_usb_generic_write(adap->dev, b, 3); ret: kfree(b); return ret; } EXPORT_SYMBOL(dibusb2_0_streaming_ctrl); int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 *b; int ret; if (!onoff) return 0; b = kmalloc(3, GFP_KERNEL); if (!b) return -ENOMEM; b[0] = DIBUSB_REQ_SET_IOCTL; b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; b[2] = DIBUSB_IOCTL_POWER_WAKEUP; ret = dvb_usb_generic_write(d, b, 3); kfree(b); return ret; } EXPORT_SYMBOL(dibusb2_0_power_ctrl); static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u8 *sndbuf; int ret, wo, len; /* write only ? */ wo = (rbuf == NULL || rlen == 0); len = 2 + wlen + (wo ? 0 : 2); sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL); if (!sndbuf) return -ENOMEM; if (4 + wlen > MAX_XFER_SIZE) { warn("i2c wr: len=%d is too big!\n", wlen); ret = -EOPNOTSUPP; goto ret; } sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ; sndbuf[1] = (addr << 1) | (wo ? 0 : 1); memcpy(&sndbuf[2], wbuf, wlen); if (!wo) { sndbuf[wlen + 2] = (rlen >> 8) & 0xff; sndbuf[wlen + 3] = rlen & 0xff; } ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0); ret: kfree(sndbuf); return ret; } /* * I2C master xfer function */ static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i+1 < num && (msg[i].flags & I2C_M_RD) == 0 && (msg[i+1].flags & I2C_M_RD)) { if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len, msg[i+1].buf,msg[i+1].len) < 0) break; i++; } else if ((msg[i].flags & I2C_M_RD) == 0) { if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0) break; } else if (msg[i].addr != 0x50) { /* 0x50 is the address of the eeprom - we need to protect it * from dibusb's bad i2c implementation: reads without * writing the offset before are forbidden */ if (dibusb_i2c_msg(d, msg[i].addr, NULL, 0, msg[i].buf, msg[i].len) < 0) break; } } mutex_unlock(&d->i2c_mutex); return i; } static u32 dibusb_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } struct i2c_algorithm dibusb_i2c_algo = { .master_xfer = dibusb_i2c_xfer, .functionality = dibusb_i2c_func, }; EXPORT_SYMBOL(dibusb_i2c_algo); int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val) { u8 *buf; int rc; buf = kzalloc(2, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = offs; rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1); *val = buf[1]; kfree(buf); return rc; } EXPORT_SYMBOL(dibusb_read_eeprom_byte); /* * common remote control stuff */ struct rc_map_table rc_map_dibusb_table[] = { /* Key codes for the little Artec T1/Twinhan/HAMA/ remote. */ { 0x0016, KEY_POWER }, { 0x0010, KEY_MUTE }, { 0x0003, KEY_1 }, { 0x0001, KEY_2 }, { 0x0006, KEY_3 }, { 0x0009, KEY_4 }, { 0x001d, KEY_5 }, { 0x001f, KEY_6 }, { 0x000d, KEY_7 }, { 0x0019, KEY_8 }, { 0x001b, KEY_9 }, { 0x0015, KEY_0 }, { 0x0005, KEY_CHANNELUP }, { 0x0002, KEY_CHANNELDOWN }, { 0x001e, KEY_VOLUMEUP }, { 0x000a, KEY_VOLUMEDOWN }, { 0x0011, KEY_RECORD }, { 0x0017, KEY_FAVORITES }, /* Heart symbol - Channel list. */ { 0x0014, KEY_PLAY }, { 0x001a, KEY_STOP }, { 0x0040, KEY_REWIND }, { 0x0012, KEY_FASTFORWARD }, { 0x000e, KEY_PREVIOUS }, /* Recall - Previous channel. */ { 0x004c, KEY_PAUSE }, { 0x004d, KEY_SCREEN }, /* Full screen mode. */ { 0x0054, KEY_AUDIO }, /* MTS - Switch to secondary audio. */ /* additional keys TwinHan VisionPlus, the Artec seemingly not have */ { 0x000c, KEY_CANCEL }, /* Cancel */ { 0x001c, KEY_EPG }, /* EPG */ { 0x0000, KEY_TAB }, /* Tab */ { 0x0048, KEY_INFO }, /* Preview */ { 0x0004, KEY_LIST }, /* RecordList */ { 0x000f, KEY_TEXT }, /* Teletext */ /* Key codes for the KWorld/ADSTech/JetWay remote. */ { 0x8612, KEY_POWER }, { 0x860f, KEY_SELECT }, /* source */ { 0x860c, KEY_UNKNOWN }, /* scan */ { 0x860b, KEY_EPG }, { 0x8610, KEY_MUTE }, { 0x8601, KEY_1 }, { 0x8602, KEY_2 }, { 0x8603, KEY_3 }, { 0x8604, KEY_4 }, { 0x8605, KEY_5 }, { 0x8606, KEY_6 }, { 0x8607, KEY_7 }, { 0x8608, KEY_8 }, { 0x8609, KEY_9 }, { 0x860a, KEY_0 }, { 0x8618, KEY_ZOOM }, { 0x861c, KEY_UNKNOWN }, /* preview */ { 0x8613, KEY_UNKNOWN }, /* snap */ { 0x8600, KEY_UNDO }, { 0x861d, KEY_RECORD }, { 0x860d, KEY_STOP }, { 0x860e, KEY_PAUSE }, { 0x8616, KEY_PLAY }, { 0x8611, KEY_BACK }, { 0x8619, KEY_FORWARD }, { 0x8614, KEY_UNKNOWN }, /* pip */ { 0x8615, KEY_ESC }, { 0x861a, KEY_UP }, { 0x861e, KEY_DOWN }, { 0x861f, KEY_LEFT }, { 0x861b, KEY_RIGHT }, /* Key codes for the DiBcom MOD3000 remote. */ { 0x8000, KEY_MUTE }, { 0x8001, KEY_TEXT }, { 0x8002, KEY_HOME }, { 0x8003, KEY_POWER }, { 0x8004, KEY_RED }, { 0x8005, KEY_GREEN }, { 0x8006, KEY_YELLOW }, { 0x8007, KEY_BLUE }, { 0x8008, KEY_DVD }, { 0x8009, KEY_AUDIO }, { 0x800a, KEY_IMAGES }, /* Pictures */ { 0x800b, KEY_VIDEO }, { 0x800c, KEY_BACK }, { 0x800d, KEY_UP }, { 0x800e, KEY_RADIO }, { 0x800f, KEY_EPG }, { 0x8010, KEY_LEFT }, { 0x8011, KEY_OK }, { 0x8012, KEY_RIGHT }, { 0x8013, KEY_UNKNOWN }, /* SAP */ { 0x8014, KEY_TV }, { 0x8015, KEY_DOWN }, { 0x8016, KEY_MENU }, /* DVD Menu */ { 0x8017, KEY_LAST }, { 0x8018, KEY_RECORD }, { 0x8019, KEY_STOP }, { 0x801a, KEY_PAUSE }, { 0x801b, KEY_PLAY }, { 0x801c, KEY_PREVIOUS }, { 0x801d, KEY_REWIND }, { 0x801e, KEY_FASTFORWARD }, { 0x801f, KEY_NEXT}, { 0x8040, KEY_1 }, { 0x8041, KEY_2 }, { 0x8042, KEY_3 }, { 0x8043, KEY_CHANNELUP }, { 0x8044, KEY_4 }, { 0x8045, KEY_5 }, { 0x8046, KEY_6 }, { 0x8047, KEY_CHANNELDOWN }, { 0x8048, KEY_7 }, { 0x8049, KEY_8 }, { 0x804a, KEY_9 }, { 0x804b, KEY_VOLUMEUP }, { 0x804c, KEY_CLEAR }, { 0x804d, KEY_0 }, { 0x804e, KEY_ENTER }, { 0x804f, KEY_VOLUMEDOWN }, }; EXPORT_SYMBOL(rc_map_dibusb_table); int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { u8 *buf; int ret; buf = kmalloc(5, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = DIBUSB_REQ_POLL_REMOTE; ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0); if (ret < 0) goto ret; dvb_usb_nec_rc_key_to_event(d, buf, event, state); if (buf[0] != 0) deb_info("key: %*ph\n", 5, buf); ret: kfree(buf); return ret; } EXPORT_SYMBOL(dibusb_rc_query); |
2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2002 Petko Manolov (petkan@users.sourceforge.net) */ #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/usb.h> #include <linux/uaccess.h> /* Version Information */ #define DRIVER_VERSION "v0.6.2 (2004/08/27)" #define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>" #define DRIVER_DESC "rtl8150 based usb-ethernet driver" #define IDR 0x0120 #define MAR 0x0126 #define CR 0x012e #define TCR 0x012f #define RCR 0x0130 #define TSR 0x0132 #define RSR 0x0133 #define CON0 0x0135 #define CON1 0x0136 #define MSR 0x0137 #define PHYADD 0x0138 #define PHYDAT 0x0139 #define PHYCNT 0x013b #define GPPC 0x013d #define BMCR 0x0140 #define BMSR 0x0142 #define ANAR 0x0144 #define ANLP 0x0146 #define AER 0x0148 #define CSCR 0x014C /* This one has the link status */ #define CSCR_LINK_STATUS (1 << 3) #define IDR_EEPROM 0x1202 #define PHY_READ 0 #define PHY_WRITE 0x20 #define PHY_GO 0x40 #define MII_TIMEOUT 10 #define INTBUFSIZE 8 #define RTL8150_REQT_READ 0xc0 #define RTL8150_REQT_WRITE 0x40 #define RTL8150_REQ_GET_REGS 0x05 #define RTL8150_REQ_SET_REGS 0x05 /* Transmit status register errors */ #define TSR_ECOL (1<<5) #define TSR_LCOL (1<<4) #define TSR_LOSS_CRS (1<<3) #define TSR_JBR (1<<2) #define TSR_ERRORS (TSR_ECOL | TSR_LCOL | TSR_LOSS_CRS | TSR_JBR) /* Receive status register errors */ #define RSR_CRC (1<<2) #define RSR_FAE (1<<1) #define RSR_ERRORS (RSR_CRC | RSR_FAE) /* Media status register definitions */ #define MSR_DUPLEX (1<<4) #define MSR_SPEED (1<<3) #define MSR_LINK (1<<2) /* USB endpoints */ enum rtl8150_usb_ep { RTL8150_USB_EP_CONTROL = 0, RTL8150_USB_EP_BULK_IN = 1, RTL8150_USB_EP_BULK_OUT = 2, RTL8150_USB_EP_INT_IN = 3, }; /* Interrupt pipe data */ #define INT_TSR 0x00 #define INT_RSR 0x01 #define INT_MSR 0x02 #define INT_WAKSR 0x03 #define INT_TXOK_CNT 0x04 #define INT_RXLOST_CNT 0x05 #define INT_CRERR_CNT 0x06 #define INT_COL_CNT 0x07 #define RTL8150_MTU 1540 #define RTL8150_TX_TIMEOUT (HZ) #define RX_SKB_POOL_SIZE 4 /* rtl8150 flags */ #define RTL8150_HW_CRC 0 #define RX_REG_SET 1 #define RTL8150_UNPLUG 2 #define RX_URB_FAIL 3 /* Define these values to match your device */ #define VENDOR_ID_REALTEK 0x0bda #define VENDOR_ID_MELCO 0x0411 #define VENDOR_ID_MICRONET 0x3980 #define VENDOR_ID_LONGSHINE 0x07b8 #define VENDOR_ID_OQO 0x1557 #define VENDOR_ID_ZYXEL 0x0586 #define PRODUCT_ID_RTL8150 0x8150 #define PRODUCT_ID_LUAKTX 0x0012 #define PRODUCT_ID_LCS8138TX 0x401a #define PRODUCT_ID_SP128AR 0x0003 #define PRODUCT_ID_PRESTIGE 0x401a #undef EEPROM_WRITE /* table of devices that work with this driver */ static const struct usb_device_id rtl8150_table[] = { {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8150)}, {USB_DEVICE(VENDOR_ID_MELCO, PRODUCT_ID_LUAKTX)}, {USB_DEVICE(VENDOR_ID_MICRONET, PRODUCT_ID_SP128AR)}, {USB_DEVICE(VENDOR_ID_LONGSHINE, PRODUCT_ID_LCS8138TX)}, {USB_DEVICE(VENDOR_ID_OQO, PRODUCT_ID_RTL8150)}, {USB_DEVICE(VENDOR_ID_ZYXEL, PRODUCT_ID_PRESTIGE)}, {} }; MODULE_DEVICE_TABLE(usb, rtl8150_table); struct rtl8150 { unsigned long flags; struct usb_device *udev; struct tasklet_struct tl; struct net_device *netdev; struct urb *rx_urb, *tx_urb, *intr_urb; struct sk_buff *tx_skb, *rx_skb; struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE]; spinlock_t rx_pool_lock; struct usb_ctrlrequest dr; int intr_interval; u8 *intr_buff; u8 phy; }; typedef struct rtl8150 rtl8150_t; struct async_req { struct usb_ctrlrequest dr; u16 rx_creg; }; static const char driver_name [] = "rtl8150"; /* ** ** device related part of the code ** */ static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { return usb_control_msg_recv(dev->udev, 0, RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, indx, 0, data, size, 1000, GFP_NOIO); } static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data) { return usb_control_msg_send(dev->udev, 0, RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, indx, 0, data, size, 1000, GFP_NOIO); } static void async_set_reg_cb(struct urb *urb) { struct async_req *req = (struct async_req *)urb->context; int status = urb->status; if (status < 0) dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status); kfree(req); usb_free_urb(urb); } static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg) { int res = -ENOMEM; struct urb *async_urb; struct async_req *req; req = kmalloc(sizeof(struct async_req), GFP_ATOMIC); if (req == NULL) return res; async_urb = usb_alloc_urb(0, GFP_ATOMIC); if (async_urb == NULL) { kfree(req); return res; } req->rx_creg = cpu_to_le16(reg); req->dr.bRequestType = RTL8150_REQT_WRITE; req->dr.bRequest = RTL8150_REQ_SET_REGS; req->dr.wIndex = 0; req->dr.wValue = cpu_to_le16(indx); req->dr.wLength = cpu_to_le16(size); usb_fill_control_urb(async_urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr, &req->rx_creg, size, async_set_reg_cb, req); res = usb_submit_urb(async_urb, GFP_ATOMIC); if (res) { if (res == -ENODEV) netif_device_detach(dev->netdev); dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res); } return res; } static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg) { int i; u8 data[3], tmp; data[0] = phy; data[1] = data[2] = 0; tmp = indx | PHY_READ | PHY_GO; i = 0; set_registers(dev, PHYADD, sizeof(data), data); set_registers(dev, PHYCNT, 1, &tmp); do { get_registers(dev, PHYCNT, 1, data); } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); if (i <= MII_TIMEOUT) { get_registers(dev, PHYDAT, 2, data); *reg = data[0] | (data[1] << 8); return 0; } else return 1; } static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg) { int i; u8 data[3], tmp; data[0] = phy; data[1] = reg & 0xff; data[2] = (reg >> 8) & 0xff; tmp = indx | PHY_WRITE | PHY_GO; i = 0; set_registers(dev, PHYADD, sizeof(data), data); set_registers(dev, PHYCNT, 1, &tmp); do { get_registers(dev, PHYCNT, 1, data); } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); if (i <= MII_TIMEOUT) return 0; else return 1; } static void set_ethernet_addr(rtl8150_t *dev) { u8 node_id[ETH_ALEN]; int ret; ret = get_registers(dev, IDR, sizeof(node_id), node_id); if (!ret) { eth_hw_addr_set(dev->netdev, node_id); } else { eth_hw_addr_random(dev->netdev); netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n", dev->netdev->dev_addr); } } static int rtl8150_set_mac_address(struct net_device *netdev, void *p) { struct sockaddr *addr = p; rtl8150_t *dev = netdev_priv(netdev); if (netif_running(netdev)) return -EBUSY; eth_hw_addr_set(netdev, addr->sa_data); netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr); /* Set the IDR registers. */ set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); #ifdef EEPROM_WRITE { int i; u8 cr; /* Get the CR contents. */ get_registers(dev, CR, 1, &cr); /* Set the WEPROM bit (eeprom write enable). */ cr |= 0x20; set_registers(dev, CR, 1, &cr); /* Write the MAC address into eeprom. Eeprom writes must be word-sized, so we need to split them up. */ for (i = 0; i * 2 < netdev->addr_len; i++) { set_registers(dev, IDR_EEPROM + (i * 2), 2, netdev->dev_addr + (i * 2)); } /* Clear the WEPROM bit (preventing accidental eeprom writes). */ cr &= 0xdf; set_registers(dev, CR, 1, &cr); } #endif return 0; } static int rtl8150_reset(rtl8150_t * dev) { u8 data = 0x10; int i = HZ; set_registers(dev, CR, 1, &data); do { get_registers(dev, CR, 1, &data); } while ((data & 0x10) && --i); return (i > 0) ? 1 : 0; } static int alloc_all_urbs(rtl8150_t * dev) { dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->rx_urb) return 0; dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->tx_urb) { usb_free_urb(dev->rx_urb); return 0; } dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->intr_urb) { usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); return 0; } return 1; } static void free_all_urbs(rtl8150_t * dev) { usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); usb_free_urb(dev->intr_urb); } static void unlink_all_urbs(rtl8150_t * dev) { usb_kill_urb(dev->rx_urb); usb_kill_urb(dev->tx_urb); usb_kill_urb(dev->intr_urb); } static inline struct sk_buff *pull_skb(rtl8150_t *dev) { struct sk_buff *skb; int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) { if (dev->rx_skb_pool[i]) { skb = dev->rx_skb_pool[i]; dev->rx_skb_pool[i] = NULL; return skb; } } return NULL; } static void read_bulk_callback(struct urb *urb) { rtl8150_t *dev; unsigned pkt_len, res; struct sk_buff *skb; struct net_device *netdev; int status = urb->status; int result; unsigned long flags; dev = urb->context; if (!dev) return; if (test_bit(RTL8150_UNPLUG, &dev->flags)) return; netdev = dev->netdev; if (!netif_device_present(netdev)) return; switch (status) { case 0: break; case -ENOENT: return; /* the urb is in unlink state */ case -ETIME: if (printk_ratelimit()) dev_warn(&urb->dev->dev, "may be reset is needed?..\n"); goto goon; default: if (printk_ratelimit()) dev_warn(&urb->dev->dev, "Rx status %d\n", status); goto goon; } if (!dev->rx_skb) goto resched; /* protect against short packets (tell me why we got some?!?) */ if (urb->actual_length < 4) goto goon; res = urb->actual_length; pkt_len = res - 4; skb_put(dev->rx_skb, pkt_len); dev->rx_skb->protocol = eth_type_trans(dev->rx_skb, netdev); netif_rx(dev->rx_skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += pkt_len; spin_lock_irqsave(&dev->rx_pool_lock, flags); skb = pull_skb(dev); spin_unlock_irqrestore(&dev->rx_pool_lock, flags); if (!skb) goto resched; dev->rx_skb = skb; goon: usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); result = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); if (result == -ENODEV) netif_device_detach(dev->netdev); else if (result) { set_bit(RX_URB_FAIL, &dev->flags); goto resched; } else { clear_bit(RX_URB_FAIL, &dev->flags); } return; resched: tasklet_schedule(&dev->tl); } static void write_bulk_callback(struct urb *urb) { rtl8150_t *dev; int status = urb->status; dev = urb->context; if (!dev) return; dev_kfree_skb_irq(dev->tx_skb); if (!netif_device_present(dev->netdev)) return; if (status) dev_info(&urb->dev->dev, "%s: Tx status %d\n", dev->netdev->name, status); netif_trans_update(dev->netdev); netif_wake_queue(dev->netdev); } static void intr_callback(struct urb *urb) { rtl8150_t *dev; __u8 *d; int status = urb->status; int res; dev = urb->context; if (!dev) return; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: dev_info(&urb->dev->dev, "%s: intr status %d\n", dev->netdev->name, status); goto resubmit; } d = urb->transfer_buffer; if (d[0] & TSR_ERRORS) { dev->netdev->stats.tx_errors++; if (d[INT_TSR] & (TSR_ECOL | TSR_JBR)) dev->netdev->stats.tx_aborted_errors++; if (d[INT_TSR] & TSR_LCOL) dev->netdev->stats.tx_window_errors++; if (d[INT_TSR] & TSR_LOSS_CRS) dev->netdev->stats.tx_carrier_errors++; } /* Report link status changes to the network stack */ if ((d[INT_MSR] & MSR_LINK) == 0) { if (netif_carrier_ok(dev->netdev)) { netif_carrier_off(dev->netdev); netdev_dbg(dev->netdev, "%s: LINK LOST\n", __func__); } } else { if (!netif_carrier_ok(dev->netdev)) { netif_carrier_on(dev->netdev); netdev_dbg(dev->netdev, "%s: LINK CAME BACK\n", __func__); } } resubmit: res = usb_submit_urb (urb, GFP_ATOMIC); if (res == -ENODEV) netif_device_detach(dev->netdev); else if (res) dev_err(&dev->udev->dev, "can't resubmit intr, %s-%s/input0, status %d\n", dev->udev->bus->bus_name, dev->udev->devpath, res); } static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message) { rtl8150_t *dev = usb_get_intfdata(intf); netif_device_detach(dev->netdev); if (netif_running(dev->netdev)) { usb_kill_urb(dev->rx_urb); usb_kill_urb(dev->intr_urb); } return 0; } static int rtl8150_resume(struct usb_interface *intf) { rtl8150_t *dev = usb_get_intfdata(intf); netif_device_attach(dev->netdev); if (netif_running(dev->netdev)) { dev->rx_urb->status = 0; dev->rx_urb->actual_length = 0; read_bulk_callback(dev->rx_urb); dev->intr_urb->status = 0; dev->intr_urb->actual_length = 0; intr_callback(dev->intr_urb); } return 0; } /* ** ** network related part of the code ** */ static void fill_skb_pool(rtl8150_t *dev) { struct sk_buff *skb; int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) { if (dev->rx_skb_pool[i]) continue; skb = dev_alloc_skb(RTL8150_MTU + 2); if (!skb) { return; } skb_reserve(skb, 2); dev->rx_skb_pool[i] = skb; } } static void free_skb_pool(rtl8150_t *dev) { int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) dev_kfree_skb(dev->rx_skb_pool[i]); } static void rx_fixup(struct tasklet_struct *t) { struct rtl8150 *dev = from_tasklet(dev, t, tl); struct sk_buff *skb; int status; spin_lock_irq(&dev->rx_pool_lock); fill_skb_pool(dev); spin_unlock_irq(&dev->rx_pool_lock); if (test_bit(RX_URB_FAIL, &dev->flags)) if (dev->rx_skb) goto try_again; spin_lock_irq(&dev->rx_pool_lock); skb = pull_skb(dev); spin_unlock_irq(&dev->rx_pool_lock); if (skb == NULL) goto tlsched; dev->rx_skb = skb; usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); try_again: status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); if (status == -ENODEV) { netif_device_detach(dev->netdev); } else if (status) { set_bit(RX_URB_FAIL, &dev->flags); goto tlsched; } else { clear_bit(RX_URB_FAIL, &dev->flags); } return; tlsched: tasklet_schedule(&dev->tl); } static int enable_net_traffic(rtl8150_t * dev) { u8 cr, tcr, rcr, msr; if (!rtl8150_reset(dev)) { dev_warn(&dev->udev->dev, "device reset failed\n"); } /* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */ rcr = 0x9e; tcr = 0xd8; cr = 0x0c; if (!(rcr & 0x80)) set_bit(RTL8150_HW_CRC, &dev->flags); set_registers(dev, RCR, 1, &rcr); set_registers(dev, TCR, 1, &tcr); set_registers(dev, CR, 1, &cr); get_registers(dev, MSR, 1, &msr); return 0; } static void disable_net_traffic(rtl8150_t * dev) { u8 cr; get_registers(dev, CR, 1, &cr); cr &= 0xf3; set_registers(dev, CR, 1, &cr); } static void rtl8150_tx_timeout(struct net_device *netdev, unsigned int txqueue) { rtl8150_t *dev = netdev_priv(netdev); dev_warn(&netdev->dev, "Tx timeout.\n"); usb_unlink_urb(dev->tx_urb); netdev->stats.tx_errors++; } static void rtl8150_set_multicast(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); u16 rx_creg = 0x9e; netif_stop_queue(netdev); if (netdev->flags & IFF_PROMISC) { rx_creg |= 0x0001; dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { rx_creg &= 0xfffe; rx_creg |= 0x0002; dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ rx_creg &= 0x00fc; } async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg); netif_wake_queue(netdev); } static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); int count, res; netif_stop_queue(netdev); count = (skb->len < 60) ? 60 : skb->len; count = (count & 0x3f) ? count : count + 1; dev->tx_skb = skb; usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), skb->data, count, write_bulk_callback, dev); if ((res = usb_submit_urb(dev->tx_urb, GFP_ATOMIC))) { /* Can we get/handle EPIPE here? */ if (res == -ENODEV) netif_device_detach(dev->netdev); else { dev_warn(&netdev->dev, "failed tx_urb %d\n", res); netdev->stats.tx_errors++; netif_start_queue(netdev); } } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; netif_trans_update(netdev); } return NETDEV_TX_OK; } static void set_carrier(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); short tmp; get_registers(dev, CSCR, 2, &tmp); if (tmp & CSCR_LINK_STATUS) netif_carrier_on(netdev); else netif_carrier_off(netdev); } static int rtl8150_open(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); int res; if (dev->rx_skb == NULL) dev->rx_skb = pull_skb(dev); if (!dev->rx_skb) return -ENOMEM; set_registers(dev, IDR, 6, netdev->dev_addr); usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(dev->netdev); dev_warn(&netdev->dev, "rx_urb submit failed: %d\n", res); return res; } usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 3), dev->intr_buff, INTBUFSIZE, intr_callback, dev, dev->intr_interval); if ((res = usb_submit_urb(dev->intr_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(dev->netdev); dev_warn(&netdev->dev, "intr_urb submit failed: %d\n", res); usb_kill_urb(dev->rx_urb); return res; } enable_net_traffic(dev); set_carrier(netdev); netif_start_queue(netdev); return res; } static int rtl8150_close(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); netif_stop_queue(netdev); if (!test_bit(RTL8150_UNPLUG, &dev->flags)) disable_net_traffic(dev); unlink_all_urbs(dev); return 0; } static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { rtl8150_t *dev = netdev_priv(netdev); strscpy(info->driver, driver_name, sizeof(info->driver)); strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } static int rtl8150_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ecmd) { rtl8150_t *dev = netdev_priv(netdev); short lpa = 0; short bmcr = 0; u32 supported; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); ecmd->base.port = PORT_TP; ecmd->base.phy_address = dev->phy; get_registers(dev, BMCR, 2, &bmcr); get_registers(dev, ANLP, 2, &lpa); if (bmcr & BMCR_ANENABLE) { u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ? SPEED_100 : SPEED_10); ecmd->base.speed = speed; ecmd->base.autoneg = AUTONEG_ENABLE; if (speed == SPEED_100) ecmd->base.duplex = (lpa & LPA_100FULL) ? DUPLEX_FULL : DUPLEX_HALF; else ecmd->base.duplex = (lpa & LPA_10FULL) ? DUPLEX_FULL : DUPLEX_HALF; } else { ecmd->base.autoneg = AUTONEG_DISABLE; ecmd->base.speed = ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10); ecmd->base.duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, supported); return 0; } static const struct ethtool_ops ops = { .get_drvinfo = rtl8150_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = rtl8150_get_link_ksettings, }; static int rtl8150_siocdevprivate(struct net_device *netdev, struct ifreq *rq, void __user *udata, int cmd) { rtl8150_t *dev = netdev_priv(netdev); u16 *data = (u16 *) & rq->ifr_ifru; int res = 0; switch (cmd) { case SIOCDEVPRIVATE: data[0] = dev->phy; fallthrough; case SIOCDEVPRIVATE + 1: read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]); break; case SIOCDEVPRIVATE + 2: if (!capable(CAP_NET_ADMIN)) return -EPERM; write_mii_word(dev, dev->phy, (data[1] & 0x1f), data[2]); break; default: res = -EOPNOTSUPP; } return res; } static const struct net_device_ops rtl8150_netdev_ops = { .ndo_open = rtl8150_open, .ndo_stop = rtl8150_close, .ndo_siocdevprivate = rtl8150_siocdevprivate, .ndo_start_xmit = rtl8150_start_xmit, .ndo_tx_timeout = rtl8150_tx_timeout, .ndo_set_rx_mode = rtl8150_set_multicast, .ndo_set_mac_address = rtl8150_set_mac_address, .ndo_validate_addr = eth_validate_addr, }; static int rtl8150_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); rtl8150_t *dev; struct net_device *netdev; static const u8 bulk_ep_addr[] = { RTL8150_USB_EP_BULK_IN | USB_DIR_IN, RTL8150_USB_EP_BULK_OUT | USB_DIR_OUT, 0}; static const u8 int_ep_addr[] = { RTL8150_USB_EP_INT_IN | USB_DIR_IN, 0}; netdev = alloc_etherdev(sizeof(rtl8150_t)); if (!netdev) return -ENOMEM; dev = netdev_priv(netdev); dev->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL); if (!dev->intr_buff) { free_netdev(netdev); return -ENOMEM; } /* Verify that all required endpoints are present */ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || !usb_check_int_endpoints(intf, int_ep_addr)) { dev_err(&intf->dev, "couldn't find required endpoints\n"); goto out; } tasklet_setup(&dev->tl, rx_fixup); spin_lock_init(&dev->rx_pool_lock); dev->udev = udev; dev->netdev = netdev; netdev->netdev_ops = &rtl8150_netdev_ops; netdev->watchdog_timeo = RTL8150_TX_TIMEOUT; netdev->ethtool_ops = &ops; dev->intr_interval = 100; /* 100ms */ if (!alloc_all_urbs(dev)) { dev_err(&intf->dev, "out of memory\n"); goto out; } if (!rtl8150_reset(dev)) { dev_err(&intf->dev, "couldn't reset the device\n"); goto out1; } fill_skb_pool(dev); set_ethernet_addr(dev); usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); if (register_netdev(netdev) != 0) { dev_err(&intf->dev, "couldn't register the device\n"); goto out2; } dev_info(&intf->dev, "%s: rtl8150 is detected\n", netdev->name); return 0; out2: usb_set_intfdata(intf, NULL); free_skb_pool(dev); out1: free_all_urbs(dev); out: kfree(dev->intr_buff); free_netdev(netdev); return -EIO; } static void rtl8150_disconnect(struct usb_interface *intf) { rtl8150_t *dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (dev) { set_bit(RTL8150_UNPLUG, &dev->flags); tasklet_kill(&dev->tl); unregister_netdev(dev->netdev); unlink_all_urbs(dev); free_all_urbs(dev); free_skb_pool(dev); dev_kfree_skb(dev->rx_skb); kfree(dev->intr_buff); free_netdev(dev->netdev); } } static struct usb_driver rtl8150_driver = { .name = driver_name, .probe = rtl8150_probe, .disconnect = rtl8150_disconnect, .id_table = rtl8150_table, .suspend = rtl8150_suspend, .resume = rtl8150_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rtl8150_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
13 4 4 4 4 3 4 2 1 1 4 10 3 1 1 1 1 2 22 3 6 6 6 5 5 4 4 1 3 3 3 4 4 4 4 4 2 6 7 6 6 5 4 3 1 2 1 1 1 1 1 1 1 1 7 9 9 9 8 8 2 2 1 1 1 1 3 1 1 1 3 9 9 8 8 8 4 1 3 8 8 4 4 4 4 4 2 2 2 20 17 17 1 16 4 1 15 6 1 15 15 15 14 15 15 15 13 15 6 15 15 17 2 20 2 19 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 | // SPDX-License-Identifier: GPL-2.0 /* * Native support for the I/O-Warrior USB devices * * Copyright (c) 2003-2005, 2020 Code Mercenaries GmbH * written by Christian Lucht <lucht@codemercs.com> and * Christoph Jung <jung@codemercs.com> * * based on * usb-skeleton.c by Greg Kroah-Hartman <greg@kroah.com> * brlvger.c by Stephane Dalton <sdalton@videotron.ca> * and Stephane Doyon <s.doyon@videotron.ca> * * Released under the GPLv2. */ #include <linux/module.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/usb/iowarrior.h> #define DRIVER_AUTHOR "Christian Lucht <lucht@codemercs.com>" #define DRIVER_DESC "USB IO-Warrior driver" #define USB_VENDOR_ID_CODEMERCS 1984 /* low speed iowarrior */ #define USB_DEVICE_ID_CODEMERCS_IOW40 0x1500 #define USB_DEVICE_ID_CODEMERCS_IOW24 0x1501 #define USB_DEVICE_ID_CODEMERCS_IOWPV1 0x1511 #define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512 /* full speed iowarrior */ #define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503 /* fuller speed iowarrior */ #define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504 #define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505 #define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506 /* OEMed devices */ #define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a #define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b /* Get a minor range for your devices from the usb maintainer */ #ifdef CONFIG_USB_DYNAMIC_MINORS #define IOWARRIOR_MINOR_BASE 0 #else #define IOWARRIOR_MINOR_BASE 208 // SKELETON_MINOR_BASE 192 + 16, not official yet #endif /* interrupt input queue size */ #define MAX_INTERRUPT_BUFFER 16 /* maximum number of urbs that are submitted for writes at the same time, this applies to the IOWarrior56 only! IOWarrior24 and IOWarrior40 use synchronous usb_control_msg calls. */ #define MAX_WRITES_IN_FLIGHT 4 MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static struct usb_driver iowarrior_driver; /*--------------*/ /* data */ /*--------------*/ /* Structure to hold all of our device specific stuff */ struct iowarrior { struct mutex mutex; /* locks this structure */ struct usb_device *udev; /* save off the usb device pointer */ struct usb_interface *interface; /* the interface for this device */ unsigned char minor; /* the starting minor number for this device */ struct usb_endpoint_descriptor *int_out_endpoint; /* endpoint for reading (needed for IOW56 only) */ struct usb_endpoint_descriptor *int_in_endpoint; /* endpoint for reading */ struct urb *int_in_urb; /* the urb for reading data */ unsigned char *int_in_buffer; /* buffer for data to be read */ unsigned char serial_number; /* to detect lost packages */ unsigned char *read_queue; /* size is MAX_INTERRUPT_BUFFER * packet size */ wait_queue_head_t read_wait; wait_queue_head_t write_wait; /* wait-queue for writing to the device */ atomic_t write_busy; /* number of write-urbs submitted */ atomic_t read_idx; atomic_t intr_idx; atomic_t overflow_flag; /* signals an index 'rollover' */ int present; /* this is 1 as long as the device is connected */ int opened; /* this is 1 if the device is currently open */ char chip_serial[9]; /* the serial number string of the chip connected */ int report_size; /* number of bytes in a report */ u16 product_id; struct usb_anchor submitted; }; /*--------------*/ /* globals */ /*--------------*/ #define USB_REQ_GET_REPORT 0x01 //#if 0 static int usb_get_report(struct usb_device *dev, struct usb_host_interface *inter, unsigned char type, unsigned char id, void *buf, int size) { return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_REPORT, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, (type << 8) + id, inter->desc.bInterfaceNumber, buf, size, USB_CTRL_GET_TIMEOUT); } //#endif #define USB_REQ_SET_REPORT 0x09 static int usb_set_report(struct usb_interface *intf, unsigned char type, unsigned char id, void *buf, int size) { return usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), USB_REQ_SET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE, (type << 8) + id, intf->cur_altsetting->desc.bInterfaceNumber, buf, size, 1000); } /*---------------------*/ /* driver registration */ /*---------------------*/ /* table of devices that work with this driver */ static const struct usb_device_id iowarrior_ids[] = { {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, iowarrior_ids); /* * USB callback handler for reading data */ static void iowarrior_callback(struct urb *urb) { struct iowarrior *dev = urb->context; int intr_idx; int read_idx; int aux_idx; int offset; int status = urb->status; int retval; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: goto exit; } intr_idx = atomic_read(&dev->intr_idx); /* aux_idx become previous intr_idx */ aux_idx = (intr_idx == 0) ? (MAX_INTERRUPT_BUFFER - 1) : (intr_idx - 1); read_idx = atomic_read(&dev->read_idx); /* queue is not empty and it's interface 0 */ if ((intr_idx != read_idx) && (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0)) { /* + 1 for serial number */ offset = aux_idx * (dev->report_size + 1); if (!memcmp (dev->read_queue + offset, urb->transfer_buffer, dev->report_size)) { /* equal values on interface 0 will be ignored */ goto exit; } } /* aux_idx become next intr_idx */ aux_idx = (intr_idx == (MAX_INTERRUPT_BUFFER - 1)) ? 0 : (intr_idx + 1); if (read_idx == aux_idx) { /* queue full, dropping oldest input */ read_idx = (++read_idx == MAX_INTERRUPT_BUFFER) ? 0 : read_idx; atomic_set(&dev->read_idx, read_idx); atomic_set(&dev->overflow_flag, 1); } /* +1 for serial number */ offset = intr_idx * (dev->report_size + 1); memcpy(dev->read_queue + offset, urb->transfer_buffer, dev->report_size); *(dev->read_queue + offset + (dev->report_size)) = dev->serial_number++; atomic_set(&dev->intr_idx, aux_idx); /* tell the blocking read about the new data */ wake_up_interruptible(&dev->read_wait); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } /* * USB Callback handler for write-ops */ static void iowarrior_write_callback(struct urb *urb) { struct iowarrior *dev; int status = urb->status; dev = urb->context; /* sync/async unlink faults aren't errors */ if (status && !(status == -ENOENT || status == -ECONNRESET || status == -ESHUTDOWN)) { dev_dbg(&dev->interface->dev, "nonzero write bulk status received: %d\n", status); } /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); /* tell a waiting writer the interrupt-out-pipe is available again */ atomic_dec(&dev->write_busy); wake_up_interruptible(&dev->write_wait); } /* * iowarrior_delete */ static inline void iowarrior_delete(struct iowarrior *dev) { dev_dbg(&dev->interface->dev, "minor %d\n", dev->minor); kfree(dev->int_in_buffer); usb_free_urb(dev->int_in_urb); kfree(dev->read_queue); usb_put_intf(dev->interface); kfree(dev); } /*---------------------*/ /* fops implementation */ /*---------------------*/ static int read_index(struct iowarrior *dev) { int intr_idx, read_idx; read_idx = atomic_read(&dev->read_idx); intr_idx = atomic_read(&dev->intr_idx); return (read_idx == intr_idx ? -1 : read_idx); } /* * iowarrior_read */ static ssize_t iowarrior_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct iowarrior *dev; int read_idx; int offset; int retval; dev = file->private_data; if (file->f_flags & O_NONBLOCK) { retval = mutex_trylock(&dev->mutex); if (!retval) return -EAGAIN; } else { retval = mutex_lock_interruptible(&dev->mutex); if (retval) return -ERESTARTSYS; } /* verify that the device wasn't unplugged */ if (!dev->present) { retval = -ENODEV; goto exit; } dev_dbg(&dev->interface->dev, "minor %d, count = %zd\n", dev->minor, count); /* read count must be packet size (+ time stamp) */ if ((count != dev->report_size) && (count != (dev->report_size + 1))) { retval = -EINVAL; goto exit; } /* repeat until no buffer overrun in callback handler occur */ do { atomic_set(&dev->overflow_flag, 0); if ((read_idx = read_index(dev)) == -1) { /* queue empty */ if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto exit; } else { //next line will return when there is either new data, or the device is unplugged int r = wait_event_interruptible(dev->read_wait, (!dev->present || (read_idx = read_index (dev)) != -1)); if (r) { //we were interrupted by a signal retval = -ERESTART; goto exit; } if (!dev->present) { //The device was unplugged retval = -ENODEV; goto exit; } if (read_idx == -1) { // Can this happen ??? retval = 0; goto exit; } } } offset = read_idx * (dev->report_size + 1); if (copy_to_user(buffer, dev->read_queue + offset, count)) { retval = -EFAULT; goto exit; } } while (atomic_read(&dev->overflow_flag)); read_idx = ++read_idx == MAX_INTERRUPT_BUFFER ? 0 : read_idx; atomic_set(&dev->read_idx, read_idx); mutex_unlock(&dev->mutex); return count; exit: mutex_unlock(&dev->mutex); return retval; } /* * iowarrior_write */ static ssize_t iowarrior_write(struct file *file, const char __user *user_buffer, size_t count, loff_t *ppos) { struct iowarrior *dev; int retval = 0; char *buf = NULL; /* for IOW24 and IOW56 we need a buffer */ struct urb *int_out_urb = NULL; dev = file->private_data; mutex_lock(&dev->mutex); /* verify that the device wasn't unplugged */ if (!dev->present) { retval = -ENODEV; goto exit; } dev_dbg(&dev->interface->dev, "minor %d, count = %zd\n", dev->minor, count); /* if count is 0 we're already done */ if (count == 0) { retval = 0; goto exit; } /* We only accept full reports */ if (count != dev->report_size) { retval = -EINVAL; goto exit; } switch (dev->product_id) { case USB_DEVICE_ID_CODEMERCS_IOW24: case USB_DEVICE_ID_CODEMERCS_IOW24SAG: case USB_DEVICE_ID_CODEMERCS_IOWPV1: case USB_DEVICE_ID_CODEMERCS_IOWPV2: case USB_DEVICE_ID_CODEMERCS_IOW40: /* IOW24 and IOW40 use a synchronous call */ buf = memdup_user(user_buffer, count); if (IS_ERR(buf)) { retval = PTR_ERR(buf); goto exit; } retval = usb_set_report(dev->interface, 2, 0, buf, count); kfree(buf); goto exit; case USB_DEVICE_ID_CODEMERCS_IOW56: case USB_DEVICE_ID_CODEMERCS_IOW56AM: case USB_DEVICE_ID_CODEMERCS_IOW28: case USB_DEVICE_ID_CODEMERCS_IOW28L: case USB_DEVICE_ID_CODEMERCS_IOW100: /* The IOW56 uses asynchronous IO and more urbs */ if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) { /* Wait until we are below the limit for submitted urbs */ if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto exit; } else { retval = wait_event_interruptible(dev->write_wait, (!dev->present || (atomic_read (&dev-> write_busy) < MAX_WRITES_IN_FLIGHT))); if (retval) { /* we were interrupted by a signal */ retval = -ERESTART; goto exit; } if (!dev->present) { /* The device was unplugged */ retval = -ENODEV; goto exit; } if (!dev->opened) { /* We were closed while waiting for an URB */ retval = -ENODEV; goto exit; } } } atomic_inc(&dev->write_busy); int_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!int_out_urb) { retval = -ENOMEM; goto error_no_urb; } buf = usb_alloc_coherent(dev->udev, dev->report_size, GFP_KERNEL, &int_out_urb->transfer_dma); if (!buf) { retval = -ENOMEM; dev_dbg(&dev->interface->dev, "Unable to allocate buffer\n"); goto error_no_buffer; } usb_fill_int_urb(int_out_urb, dev->udev, usb_sndintpipe(dev->udev, dev->int_out_endpoint->bEndpointAddress), buf, dev->report_size, iowarrior_write_callback, dev, dev->int_out_endpoint->bInterval); int_out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (copy_from_user(buf, user_buffer, count)) { retval = -EFAULT; goto error; } usb_anchor_urb(int_out_urb, &dev->submitted); retval = usb_submit_urb(int_out_urb, GFP_KERNEL); if (retval) { dev_dbg(&dev->interface->dev, "submit error %d for urb nr.%d\n", retval, atomic_read(&dev->write_busy)); usb_unanchor_urb(int_out_urb); goto error; } /* submit was ok */ retval = count; usb_free_urb(int_out_urb); goto exit; default: /* what do we have here ? An unsupported Product-ID ? */ dev_err(&dev->interface->dev, "%s - not supported for product=0x%x\n", __func__, dev->product_id); retval = -EFAULT; goto exit; } error: usb_free_coherent(dev->udev, dev->report_size, buf, int_out_urb->transfer_dma); error_no_buffer: usb_free_urb(int_out_urb); error_no_urb: atomic_dec(&dev->write_busy); wake_up_interruptible(&dev->write_wait); exit: mutex_unlock(&dev->mutex); return retval; } /* * iowarrior_ioctl */ static long iowarrior_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct iowarrior *dev = NULL; __u8 *buffer; __u8 __user *user_buffer; int retval; int io_res; /* checks for bytes read/written and copy_to/from_user results */ dev = file->private_data; if (!dev) return -ENODEV; buffer = kzalloc(dev->report_size, GFP_KERNEL); if (!buffer) return -ENOMEM; mutex_lock(&dev->mutex); /* verify that the device wasn't unplugged */ if (!dev->present) { retval = -ENODEV; goto error_out; } dev_dbg(&dev->interface->dev, "minor %d, cmd 0x%.4x, arg %ld\n", dev->minor, cmd, arg); retval = 0; switch (cmd) { case IOW_WRITE: if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 || dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) { user_buffer = (__u8 __user *)arg; io_res = copy_from_user(buffer, user_buffer, dev->report_size); if (io_res) { retval = -EFAULT; } else { io_res = usb_set_report(dev->interface, 2, 0, buffer, dev->report_size); if (io_res < 0) retval = io_res; } } else { retval = -EINVAL; dev_err(&dev->interface->dev, "ioctl 'IOW_WRITE' is not supported for product=0x%x.\n", dev->product_id); } break; case IOW_READ: user_buffer = (__u8 __user *)arg; io_res = usb_get_report(dev->udev, dev->interface->cur_altsetting, 1, 0, buffer, dev->report_size); if (io_res < 0) retval = io_res; else { io_res = copy_to_user(user_buffer, buffer, dev->report_size); if (io_res) retval = -EFAULT; } break; case IOW_GETINFO: { /* Report available information for the device */ struct iowarrior_info info; /* needed for power consumption */ struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc; memset(&info, 0, sizeof(info)); /* directly from the descriptor */ info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); info.product = dev->product_id; info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice); /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */ info.speed = dev->udev->speed; info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber; info.report_size = dev->report_size; /* serial number string has been read earlier 8 chars or empty string */ memcpy(info.serial, dev->chip_serial, sizeof(dev->chip_serial)); if (cfg_descriptor == NULL) { info.power = -1; /* no information available */ } else { /* the MaxPower is stored in units of 2mA to make it fit into a byte-value */ info.power = cfg_descriptor->bMaxPower * 2; } io_res = copy_to_user((struct iowarrior_info __user *)arg, &info, sizeof(struct iowarrior_info)); if (io_res) retval = -EFAULT; break; } default: /* return that we did not understand this ioctl call */ retval = -ENOTTY; break; } error_out: /* unlock the device */ mutex_unlock(&dev->mutex); kfree(buffer); return retval; } /* * iowarrior_open */ static int iowarrior_open(struct inode *inode, struct file *file) { struct iowarrior *dev = NULL; struct usb_interface *interface; int subminor; int retval = 0; subminor = iminor(inode); interface = usb_find_interface(&iowarrior_driver, subminor); if (!interface) { pr_err("%s - error, can't find device for minor %d\n", __func__, subminor); return -ENODEV; } dev = usb_get_intfdata(interface); if (!dev) return -ENODEV; mutex_lock(&dev->mutex); /* Only one process can open each device, no sharing. */ if (dev->opened) { retval = -EBUSY; goto out; } /* setup interrupt handler for receiving values */ if ((retval = usb_submit_urb(dev->int_in_urb, GFP_KERNEL)) < 0) { dev_err(&interface->dev, "Error %d while submitting URB\n", retval); retval = -EFAULT; goto out; } /* increment our usage count for the driver */ ++dev->opened; /* save our object in the file's private structure */ file->private_data = dev; retval = 0; out: mutex_unlock(&dev->mutex); return retval; } /* * iowarrior_release */ static int iowarrior_release(struct inode *inode, struct file *file) { struct iowarrior *dev; int retval = 0; dev = file->private_data; if (!dev) return -ENODEV; dev_dbg(&dev->interface->dev, "minor %d\n", dev->minor); /* lock our device */ mutex_lock(&dev->mutex); if (dev->opened <= 0) { retval = -ENODEV; /* close called more than once */ mutex_unlock(&dev->mutex); } else { dev->opened = 0; /* we're closing now */ retval = 0; if (dev->present) { /* The device is still connected so we only shutdown pending read-/write-ops. */ usb_kill_urb(dev->int_in_urb); wake_up_interruptible(&dev->read_wait); wake_up_interruptible(&dev->write_wait); mutex_unlock(&dev->mutex); } else { /* The device was unplugged, cleanup resources */ mutex_unlock(&dev->mutex); iowarrior_delete(dev); } } return retval; } static __poll_t iowarrior_poll(struct file *file, poll_table * wait) { struct iowarrior *dev = file->private_data; __poll_t mask = 0; if (!dev->present) return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); poll_wait(file, &dev->write_wait, wait); if (!dev->present) return EPOLLERR | EPOLLHUP; if (read_index(dev) != -1) mask |= EPOLLIN | EPOLLRDNORM; if (atomic_read(&dev->write_busy) < MAX_WRITES_IN_FLIGHT) mask |= EPOLLOUT | EPOLLWRNORM; return mask; } /* * File operations needed when we register this driver. * This assumes that this driver NEEDS file operations, * of course, which means that the driver is expected * to have a node in the /dev directory. If the USB * device were for a network interface then the driver * would use "struct net_driver" instead, and a serial * device would use "struct tty_driver". */ static const struct file_operations iowarrior_fops = { .owner = THIS_MODULE, .write = iowarrior_write, .read = iowarrior_read, .unlocked_ioctl = iowarrior_ioctl, .open = iowarrior_open, .release = iowarrior_release, .poll = iowarrior_poll, .llseek = noop_llseek, }; static char *iowarrior_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev)); } /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with devfs and the driver core */ static struct usb_class_driver iowarrior_class = { .name = "iowarrior%d", .devnode = iowarrior_devnode, .fops = &iowarrior_fops, .minor_base = IOWARRIOR_MINOR_BASE, }; /*---------------------------------*/ /* probe and disconnect functions */ /*---------------------------------*/ /* * iowarrior_probe * * Called by the usb core when a new device is connected that it thinks * this driver might be interested in. */ static int iowarrior_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct iowarrior *dev = NULL; struct usb_host_interface *iface_desc; int retval = -ENOMEM; int res; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL); if (!dev) return retval; mutex_init(&dev->mutex); atomic_set(&dev->intr_idx, 0); atomic_set(&dev->read_idx, 0); atomic_set(&dev->overflow_flag, 0); init_waitqueue_head(&dev->read_wait); atomic_set(&dev->write_busy, 0); init_waitqueue_head(&dev->write_wait); dev->udev = udev; dev->interface = usb_get_intf(interface); iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); init_usb_anchor(&dev->submitted); res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint); if (res) { dev_err(&interface->dev, "no interrupt-in endpoint found\n"); retval = res; goto error; } if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) || (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) || (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) || (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) || (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) { res = usb_find_last_int_out_endpoint(iface_desc, &dev->int_out_endpoint); if (res) { dev_err(&interface->dev, "no interrupt-out endpoint found\n"); retval = res; goto error; } } /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); /* * Some devices need the report size to be different than the * endpoint size. */ if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) { switch (dev->product_id) { case USB_DEVICE_ID_CODEMERCS_IOW56: case USB_DEVICE_ID_CODEMERCS_IOW56AM: dev->report_size = 7; break; case USB_DEVICE_ID_CODEMERCS_IOW28: case USB_DEVICE_ID_CODEMERCS_IOW28L: dev->report_size = 4; break; case USB_DEVICE_ID_CODEMERCS_IOW100: dev->report_size = 12; break; } } /* create the urb and buffer for reading */ dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->int_in_urb) goto error; dev->int_in_buffer = kmalloc(dev->report_size, GFP_KERNEL); if (!dev->int_in_buffer) goto error; usb_fill_int_urb(dev->int_in_urb, dev->udev, usb_rcvintpipe(dev->udev, dev->int_in_endpoint->bEndpointAddress), dev->int_in_buffer, dev->report_size, iowarrior_callback, dev, dev->int_in_endpoint->bInterval); /* create an internal buffer for interrupt data from the device */ dev->read_queue = kmalloc_array(dev->report_size + 1, MAX_INTERRUPT_BUFFER, GFP_KERNEL); if (!dev->read_queue) goto error; /* Get the serial-number of the chip */ memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial)); usb_string(udev, udev->descriptor.iSerialNumber, dev->chip_serial, sizeof(dev->chip_serial)); if (strlen(dev->chip_serial) != 8) memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial)); /* Set the idle timeout to 0, if this is interface 0 */ if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) { usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0A, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } /* allow device read and ioctl */ dev->present = 1; /* we can register the device now, as it is ready */ usb_set_intfdata(interface, dev); retval = usb_register_dev(interface, &iowarrior_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); goto error; } dev->minor = interface->minor; /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "IOWarrior product=0x%x, serial=%s interface=%d " "now attached to iowarrior%d\n", dev->product_id, dev->chip_serial, iface_desc->desc.bInterfaceNumber, dev->minor - IOWARRIOR_MINOR_BASE); return retval; error: iowarrior_delete(dev); return retval; } /* * iowarrior_disconnect * * Called by the usb core when the device is removed from the system. */ static void iowarrior_disconnect(struct usb_interface *interface) { struct iowarrior *dev = usb_get_intfdata(interface); usb_deregister_dev(interface, &iowarrior_class); mutex_lock(&dev->mutex); /* prevent device read, write and ioctl */ dev->present = 0; if (dev->opened) { /* There is a process that holds a filedescriptor to the device , so we only shutdown read-/write-ops going on. Deleting the device is postponed until close() was called. */ usb_kill_urb(dev->int_in_urb); usb_kill_anchored_urbs(&dev->submitted); wake_up_interruptible(&dev->read_wait); wake_up_interruptible(&dev->write_wait); mutex_unlock(&dev->mutex); } else { /* no process is using the device, cleanup now */ mutex_unlock(&dev->mutex); iowarrior_delete(dev); } } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver iowarrior_driver = { .name = "iowarrior", .probe = iowarrior_probe, .disconnect = iowarrior_disconnect, .id_table = iowarrior_ids, }; module_usb_driver(iowarrior_driver); |
8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_SIMD_H #define _ASM_SIMD_H #include <asm/fpu/api.h> #include <linux/compiler_attributes.h> #include <linux/types.h> /* * may_use_simd - whether it is allowable at this time to issue SIMD * instructions or access the SIMD register file */ static __must_check inline bool may_use_simd(void) { return irq_fpu_usable(); } #endif /* _ASM_SIMD_H */ |
11 11 11 9 9 9 11 11 10 11 11 11 11 11 11 11 11 11 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 | // SPDX-License-Identifier: GPL-2.0 /* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2021 Intel Corporation * Copyright 2023 NXP */ #include <linux/property.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "hci_codec.h" #include "hci_debugfs.h" #include "smp.h" #include "eir.h" #include "msft.h" #include "aosp.h" #include "leds.h" static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, struct sk_buff *skb) { bt_dev_dbg(hdev, "result 0x%2.2x", result); if (hdev->req_status != HCI_REQ_PEND) return; hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; /* Free the request command so it is not used as response */ kfree_skb(hdev->req_skb); hdev->req_skb = NULL; if (skb) { struct sock *sk = hci_skb_sk(skb); /* Drop sk reference if set */ if (sk) sock_put(sk); hdev->req_rsp = skb_get(skb); } wake_up_interruptible(&hdev->req_wait_q); } struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, struct sock *sk) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) return NULL; hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = cpu_to_le16(opcode); hdr->plen = plen; if (plen) skb_put_data(skb, param, plen); bt_dev_dbg(hdev, "skb len %d", skb->len); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_skb_opcode(skb) = opcode; /* Grab a reference if command needs to be associated with a sock (e.g. * likely mgmt socket that initiated the command). */ if (sk) { hci_skb_sk(skb) = sk; sock_hold(sk); } return skb; } static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, const void *param, u8 event, struct sock *sk) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. */ if (req->err) return; skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); req->err = -ENOMEM; return; } if (skb_queue_empty(&req->cmd_q)) bt_cb(skb)->hci.req_flags |= HCI_REQ_START; hci_skb_event(skb) = event; skb_queue_tail(&req->cmd_q, skb); } static int hci_req_sync_run(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; unsigned long flags; bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. */ if (req->err) { skb_queue_purge(&req->cmd_q); return req->err; } /* Do not allow empty requests */ if (skb_queue_empty(&req->cmd_q)) return -ENODATA; skb = skb_peek_tail(&req->cmd_q); bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; spin_lock_irqsave(&hdev->cmd_q.lock, flags); skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; } static void hci_request_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); req->hdev = hdev; req->err = 0; } /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout, struct sock *sk) { struct hci_request req; struct sk_buff *skb; int err = 0; bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); hci_request_init(&req, hdev); hci_cmd_sync_add(&req, opcode, plen, param, event, sk); hdev->req_status = HCI_REQ_PEND; err = hci_req_sync_run(&req); if (err < 0) return ERR_PTR(err); err = wait_event_interruptible_timeout(hdev->req_wait_q, hdev->req_status != HCI_REQ_PEND, timeout); if (err == -ERESTARTSYS) return ERR_PTR(-EINTR); switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); break; case HCI_REQ_CANCELED: err = -hdev->req_result; break; default: err = -ETIMEDOUT; break; } hdev->req_status = 0; hdev->req_result = 0; skb = hdev->req_rsp; hdev->req_rsp = NULL; bt_dev_dbg(hdev, "end: err %d", err); if (err < 0) { kfree_skb(skb); return ERR_PTR(err); } /* If command return a status event skb will be set to NULL as there are * no parameters. */ if (!skb) return ERR_PTR(-ENODATA); return skb; } EXPORT_SYMBOL(__hci_cmd_sync_sk); /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); } EXPORT_SYMBOL(__hci_cmd_sync); /* Send HCI command and wait for command complete event */ struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { struct sk_buff *skb; if (!test_bit(HCI_UP, &hdev->flags)) return ERR_PTR(-ENETDOWN); bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); hci_req_sync_lock(hdev); skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); hci_req_sync_unlock(hdev); return skb; } EXPORT_SYMBOL(hci_cmd_sync); /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout) { return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, NULL); } EXPORT_SYMBOL(__hci_cmd_sync_ev); /* This function requires the caller holds hdev->req_lock. */ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout, struct sock *sk) { struct sk_buff *skb; u8 status; skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); /* If command return a status event, skb will be set to -ENODATA */ if (skb == ERR_PTR(-ENODATA)) return 0; if (IS_ERR(skb)) { if (!event) bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode, PTR_ERR(skb)); return PTR_ERR(skb); } status = skb->data[0]; kfree_skb(skb); return status; } EXPORT_SYMBOL(__hci_cmd_sync_status_sk); int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, NULL); } EXPORT_SYMBOL(__hci_cmd_sync_status); int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { int err; hci_req_sync_lock(hdev); err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout); hci_req_sync_unlock(hdev); return err; } EXPORT_SYMBOL(hci_cmd_sync_status); static void hci_cmd_sync_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); bt_dev_dbg(hdev, ""); /* Dequeue all entries and run them */ while (1) { struct hci_cmd_sync_work_entry *entry; mutex_lock(&hdev->cmd_sync_work_lock); entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, struct hci_cmd_sync_work_entry, list); if (entry) list_del(&entry->list); mutex_unlock(&hdev->cmd_sync_work_lock); if (!entry) break; bt_dev_dbg(hdev, "entry %p", entry); if (entry->func) { int err; hci_req_sync_lock(hdev); err = entry->func(hdev, entry->data); if (entry->destroy) entry->destroy(hdev, entry->data, err); hci_req_sync_unlock(hdev); } kfree(entry); } } static void hci_cmd_sync_cancel_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); cancel_delayed_work_sync(&hdev->cmd_timer); cancel_delayed_work_sync(&hdev->ncmd_timer); atomic_set(&hdev->cmd_cnt, 1); wake_up_interruptible(&hdev->req_wait_q); } static int hci_scan_disable_sync(struct hci_dev *hdev); static int scan_disable_sync(struct hci_dev *hdev, void *data) { return hci_scan_disable_sync(hdev); } static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) { return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0); } static void le_scan_disable(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_disable.work); int status; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) goto _return; status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); if (status) { bt_dev_err(hdev, "failed to disable LE scan: %d", status); goto _return; } /* If we were running LE only scan, change discovery state. If * we were running both LE and BR/EDR inquiry simultaneously, * and BR/EDR inquiry is already finished, stop discovery, * otherwise BR/EDR inquiry will stop discovery when finished. * If we will resolve remote device name, do not change * discovery state. */ if (hdev->discovery.type == DISCOV_TYPE_LE) goto discov_stopped; if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) goto _return; if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { if (!test_bit(HCI_INQUIRY, &hdev->flags) && hdev->discovery.state != DISCOVERY_RESOLVING) goto discov_stopped; goto _return; } status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); if (status) { bt_dev_err(hdev, "inquiry failed: status %d", status); goto discov_stopped; } goto _return; discov_stopped: hci_discovery_set_state(hdev, DISCOVERY_STOPPED); _return: hci_dev_unlock(hdev); } static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, u8 filter_dup); static int reenable_adv_sync(struct hci_dev *hdev, void *data) { bt_dev_dbg(hdev, ""); if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) return 0; if (hdev->cur_adv_instance) { return hci_schedule_adv_instance_sync(hdev, hdev->cur_adv_instance, true); } else { if (ext_adv_capable(hdev)) { hci_start_ext_adv_sync(hdev, 0x00); } else { hci_update_adv_data_sync(hdev, 0x00); hci_update_scan_rsp_data_sync(hdev, 0x00); hci_enable_advertising_sync(hdev); } } return 0; } static void reenable_adv(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, reenable_adv_work); int status; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); if (status) bt_dev_err(hdev, "failed to reenable ADV: %d", status); hci_dev_unlock(hdev); } static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { hdev->adv_instance_timeout = 0; cancel_delayed_work(&hdev->adv_instance_expire); } } /* For a single instance: * - force == true: The instance will be removed even when its remaining * lifetime is not zero. * - force == false: the instance will be deactivated but kept stored unless * the remaining lifetime is zero. * * For instance == 0x00: * - force == true: All instances will be removed regardless of their timeout * setting. * - force == false: Only instances that have a timeout will be removed. */ int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, u8 instance, bool force) { struct adv_info *adv_instance, *n, *next_instance = NULL; int err; u8 rem_inst; /* Cancel any timeout concerning the removed instance(s). */ if (!instance || hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); /* Get the next instance to advertise BEFORE we remove * the current one. This can be the same instance again * if there is only one instance. */ if (instance && hdev->cur_adv_instance == instance) next_instance = hci_get_next_instance(hdev, instance); if (instance == 0x00) { list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { if (!(force || adv_instance->timeout)) continue; rem_inst = adv_instance->instance; err = hci_remove_adv_instance(hdev, rem_inst); if (!err) mgmt_advertising_removed(sk, hdev, rem_inst); } } else { adv_instance = hci_find_adv_instance(hdev, instance); if (force || (adv_instance && adv_instance->timeout && !adv_instance->remaining_time)) { /* Don't advertise a removed instance. */ if (next_instance && next_instance->instance == instance) next_instance = NULL; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); } } if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) return 0; if (next_instance && !ext_adv_capable(hdev)) return hci_schedule_adv_instance_sync(hdev, next_instance->instance, false); return 0; } static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) { u8 instance = *(u8 *)data; kfree(data); hci_clear_adv_instance_sync(hdev, NULL, instance, false); if (list_empty(&hdev->adv_instances)) return hci_disable_advertising_sync(hdev); return 0; } static void adv_timeout_expire(struct work_struct *work) { u8 *inst_ptr; struct hci_dev *hdev = container_of(work, struct hci_dev, adv_instance_expire.work); bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); hdev->adv_instance_timeout = 0; if (hdev->cur_adv_instance == 0x00) goto unlock; inst_ptr = kmalloc(1, GFP_KERNEL); if (!inst_ptr) goto unlock; *inst_ptr = hdev->cur_adv_instance; hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); unlock: hci_dev_unlock(hdev); } static bool is_interleave_scanning(struct hci_dev *hdev) { return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; } static int hci_passive_scan_sync(struct hci_dev *hdev); static void interleave_scan_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, interleave_scan.work); unsigned long timeout; if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); } else { bt_dev_err(hdev, "unexpected error"); return; } hci_passive_scan_sync(hdev); hci_dev_lock(hdev); switch (hdev->interleave_scan_state) { case INTERLEAVE_SCAN_ALLOWLIST: bt_dev_dbg(hdev, "next state: allowlist"); hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; break; case INTERLEAVE_SCAN_NO_FILTER: bt_dev_dbg(hdev, "next state: no filter"); hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; break; case INTERLEAVE_SCAN_NONE: bt_dev_err(hdev, "unexpected error"); } hci_dev_unlock(hdev); /* Don't continue interleaving if it was canceled */ if (is_interleave_scanning(hdev)) queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, timeout); } void hci_cmd_sync_init(struct hci_dev *hdev) { INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); INIT_LIST_HEAD(&hdev->cmd_sync_work_list); mutex_init(&hdev->cmd_sync_work_lock); mutex_init(&hdev->unregister_lock); INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); INIT_WORK(&hdev->reenable_adv_work, reenable_adv); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); } static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, struct hci_cmd_sync_work_entry *entry, int err) { if (entry->destroy) entry->destroy(hdev, entry->data, err); list_del(&entry->list); kfree(entry); } void hci_cmd_sync_clear(struct hci_dev *hdev) { struct hci_cmd_sync_work_entry *entry, *tmp; cancel_work_sync(&hdev->cmd_sync_work); cancel_work_sync(&hdev->reenable_adv_work); mutex_lock(&hdev->cmd_sync_work_lock); list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); mutex_unlock(&hdev->cmd_sync_work_lock); } void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); } } EXPORT_SYMBOL(hci_cmd_sync_cancel); /* Cancel ongoing command request synchronously: * * - Set result and mark status to HCI_REQ_CANCELED * - Wakeup command sync thread */ void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { /* req_result is __u32 so error must be positive to be properly * propagated. */ hdev->req_result = err < 0 ? -err : err; hdev->req_status = HCI_REQ_CANCELED; wake_up_interruptible(&hdev->req_wait_q); } } EXPORT_SYMBOL(hci_cmd_sync_cancel_sync); /* Submit HCI command to be run in as cmd_sync_work: * * - hdev must _not_ be unregistered */ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; int err = 0; mutex_lock(&hdev->unregister_lock); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { err = -ENODEV; goto unlock; } entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { err = -ENOMEM; goto unlock; } entry->func = func; entry->data = data; entry->destroy = destroy; mutex_lock(&hdev->cmd_sync_work_lock); list_add_tail(&entry->list, &hdev->cmd_sync_work_list); mutex_unlock(&hdev->cmd_sync_work_lock); queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); unlock: mutex_unlock(&hdev->unregister_lock); return err; } EXPORT_SYMBOL(hci_cmd_sync_submit); /* Queue HCI command: * * - hdev must be running */ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { /* Only queue command if hdev is running which means it had been opened * and is either on init phase or is already up. */ if (!test_bit(HCI_RUNNING, &hdev->flags)) return -ENETDOWN; return hci_cmd_sync_submit(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_queue); static struct hci_cmd_sync_work_entry * _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry, *tmp; list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { if (func && entry->func != func) continue; if (data && entry->data != data) continue; if (destroy && entry->destroy != destroy) continue; return entry; } return NULL; } /* Queue HCI command entry once: * * - Lookup if an entry already exist and only if it doesn't creates a new entry * and queue it. */ int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) return 0; return hci_cmd_sync_queue(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_queue_once); /* Run HCI command: * * - hdev must be running * - if on cmd_sync_work then run immediately otherwise queue */ int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { /* Only queue command if hdev is running which means it had been opened * and is either on init phase or is already up. */ if (!test_bit(HCI_RUNNING, &hdev->flags)) return -ENETDOWN; /* If on cmd_sync_work then run immediately otherwise queue */ if (current_work() == &hdev->cmd_sync_work) return func(hdev, data); return hci_cmd_sync_submit(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_run); /* Run HCI command entry once: * * - Lookup if an entry already exist and only if it doesn't creates a new entry * and run it. * - if on cmd_sync_work then run immediately otherwise queue */ int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) return 0; return hci_cmd_sync_run(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_run_once); /* Lookup HCI command entry: * * - Return first entry that matches by function callback or data or * destroy callback. */ struct hci_cmd_sync_work_entry * hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; mutex_lock(&hdev->cmd_sync_work_lock); entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); mutex_unlock(&hdev->cmd_sync_work_lock); return entry; } EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); /* Cancel HCI command entry */ void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, struct hci_cmd_sync_work_entry *entry) { mutex_lock(&hdev->cmd_sync_work_lock); _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); mutex_unlock(&hdev->cmd_sync_work_lock); } EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); /* Dequeue one HCI command entry: * * - Lookup and cancel first entry that matches. */ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); if (!entry) return false; hci_cmd_sync_cancel_entry(hdev, entry); return true; } EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); /* Dequeue HCI command entry: * * - Lookup and cancel any entry that matches by function callback or data or * destroy callback. */ bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; bool ret = false; mutex_lock(&hdev->cmd_sync_work_lock); while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy))) { _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); ret = true; } mutex_unlock(&hdev->cmd_sync_work_lock); return ret; } EXPORT_SYMBOL(hci_cmd_sync_dequeue); int hci_update_eir_sync(struct hci_dev *hdev) { struct hci_cp_write_eir cp; bt_dev_dbg(hdev, ""); if (!hdev_is_powered(hdev)) return 0; if (!lmp_ext_inq_capable(hdev)) return 0; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return 0; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return 0; memset(&cp, 0, sizeof(cp)); eir_create(hdev, cp.data); if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) return 0; memcpy(hdev->eir, cp.data, sizeof(cp.data)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static u8 get_service_classes(struct hci_dev *hdev) { struct bt_uuid *uuid; u8 val = 0; list_for_each_entry(uuid, &hdev->uuids, list) val |= uuid->svc_hint; return val; } int hci_update_class_sync(struct hci_dev *hdev) { u8 cod[3]; bt_dev_dbg(hdev, ""); if (!hdev_is_powered(hdev)) return 0; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return 0; cod[0] = hdev->minor_class; cod[1] = hdev->major_class; cod[2] = get_service_classes(hdev); if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) cod[1] |= 0x20; if (memcmp(cod, hdev->dev_class, 3) == 0) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod, HCI_CMD_TIMEOUT); } static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) { /* If there is no connection we are OK to advertise. */ if (hci_conn_num(hdev, LE_LINK) == 0) return true; /* Check le_states if there is any connection in peripheral role. */ if (hdev->conn_hash.le_num_peripheral > 0) { /* Peripheral connection state and non connectable mode * bit 20. */ if (!connectable && !(hdev->le_states[2] & 0x10)) return false; /* Peripheral connection state and connectable mode bit 38 * and scannable bit 21. */ if (connectable && (!(hdev->le_states[4] & 0x40) || !(hdev->le_states[2] & 0x20))) return false; } /* Check le_states if there is any connection in central role. */ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { /* Central connection state and non connectable mode bit 18. */ if (!connectable && !(hdev->le_states[2] & 0x02)) return false; /* Central connection state and connectable mode bit 35 and * scannable 19. */ if (connectable && (!(hdev->le_states[4] & 0x08) || !(hdev->le_states[2] & 0x08))) return false; } return true; } static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) { /* If privacy is not enabled don't use RPA */ if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return false; /* If basic privacy mode is enabled use RPA */ if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) return true; /* If limited privacy mode is enabled don't use RPA if we're * both discoverable and bondable. */ if ((flags & MGMT_ADV_FLAG_DISCOV) && hci_dev_test_flag(hdev, HCI_BONDABLE)) return false; /* We're neither bondable nor discoverable in the limited * privacy mode, therefore use RPA. */ return true; } static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) { /* If a random_addr has been set we're advertising or initiating an LE * connection we can't go ahead and change the random address at this * time. This is because the eventual initiator address used for the * subsequently created connection will be undefined (some * controllers use the new address and others the one we had * when the operation started). * * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ if (bacmp(&hdev->random_addr, BDADDR_ANY) && (hci_dev_test_flag(hdev, HCI_LE_ADV) || hci_lookup_le_connect(hdev))) { bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return 0; } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa, HCI_CMD_TIMEOUT); } int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, bool rpa, u8 *own_addr_type) { int err; /* If privacy is enabled use a resolvable private address. If * current RPA has expired or there is something else than * the current RPA in use, then generate a new one. */ if (rpa) { /* If Controller supports LL Privacy use own address type is * 0x03 */ if (ll_privacy_capable(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; /* Check if RPA is valid */ if (rpa_valid(hdev)) return 0; err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } err = hci_set_random_addr_sync(hdev, &hdev->rpa); if (err) return err; return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for active * scanning and non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; return hci_set_random_addr_sync(hdev, &nrpa); } /* If forcing static address is in use or there is no public * address use the static address as random address (but skip * the HCI command if the current random address is already the * static one. * * In case BR/EDR has been disabled on a dual-mode controller * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { *own_addr_type = ADDR_LE_DEV_RANDOM; if (bacmp(&hdev->static_addr, &hdev->random_addr)) return hci_set_random_addr_sync(hdev, &hdev->static_addr); return 0; } /* Neither privacy nor static address is being used so use a * public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *set; u8 data[sizeof(*cp) + sizeof(*set) * 1]; u8 size; struct adv_info *adv = NULL; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; /* If not enabled there is nothing to do */ if (!adv->enabled) return 0; } memset(data, 0, sizeof(data)); cp = (void *)data; set = (void *)cp->data; /* Instance 0x00 indicates all advertising instances will be disabled */ cp->num_of_sets = !!instance; cp->enable = 0x00; set->handle = adv ? adv->handle : instance; size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, size, data, HCI_CMD_TIMEOUT); } static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, bdaddr_t *random_addr) { struct hci_cp_le_set_adv_set_rand_addr cp; int err; if (!instance) { /* Instance 0x00 doesn't have an adv_info, instead it uses * hdev->random_addr to track its address so whenever it needs * to be updated this also set the random address since * hdev->random_addr is shared with scan state machine. */ err = hci_set_random_addr_sync(hdev, random_addr); if (err) return err; } memset(&cp, 0, sizeof(cp)); cp.handle = instance; bacpy(&cp.bdaddr, random_addr); return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; bool connectable; u32 flags; bdaddr_t random_addr; u8 own_addr_type; int err; struct adv_info *adv; bool secondary_adv; if (instance > 0) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; } else { adv = NULL; } /* Updating parameters of an active instance will return a * Command Disallowed error, so we must first disable the * instance if it is active. */ if (adv && !adv->pending) { err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; } flags = hci_adv_instance_flags(hdev, instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return -EPERM; /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ err = hci_get_random_address(hdev, !connectable, adv_use_rpa(hdev, flags), adv, &own_addr_type, &random_addr); if (err < 0) return err; memset(&cp, 0, sizeof(cp)); if (adv) { hci_cpu_to_le24(adv->min_interval, cp.min_interval); hci_cpu_to_le24(adv->max_interval, cp.max_interval); cp.tx_power = adv->tx_power; cp.sid = adv->sid; } else { hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; cp.sid = 0x00; } secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); if (connectable) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); } else if (hci_adv_instance_is_scannable(hdev, instance) || (flags & MGMT_ADV_PARAM_SCAN_RSP)) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); } else { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); } /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter * contains the peer’s Identity Address and the Peer_Address_Type * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). * These parameters are used to locate the corresponding local IRK in * the resolving list; this IRK is used to generate their own address * used in the advertisement. */ if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) hci_copy_identity_address(hdev, &cp.peer_addr, &cp.peer_addr_type); cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.handle = adv ? adv->handle : instance; if (flags & MGMT_ADV_FLAG_SEC_2M) { cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_2M; } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { cp.primary_phy = HCI_ADV_PHY_CODED; cp.secondary_phy = HCI_ADV_PHY_CODED; } else { /* In all other cases use 1M */ cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_1M; } err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) return err; if ((own_addr_type == ADDR_LE_DEV_RANDOM || own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && bacmp(&random_addr, BDADDR_ANY)) { /* Check if random address need to be updated */ if (adv) { if (!bacmp(&random_addr, &adv->random_addr)) return 0; } else { if (!bacmp(&random_addr, &hdev->random_addr)) return 0; } return hci_set_adv_set_random_addr_sync(hdev, instance, &random_addr); } return 0; } static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length, HCI_MAX_EXT_AD_LENGTH); u8 len; struct adv_info *adv = NULL; int err; if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->scan_rsp_changed) return 0; } len = eir_create_scan_rsp(hdev, instance, pdu->data); pdu->handle = adv ? adv->handle : instance; pdu->length = len; pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, struct_size(pdu, data, len), pdu, HCI_CMD_TIMEOUT); if (err) return err; if (adv) { adv->scan_rsp_changed = false; } else { memcpy(hdev->scan_rsp_data, pdu->data, len); hdev->scan_rsp_data_len = len; } return 0; } static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_scan_rsp_data cp; u8 len; memset(&cp, 0, sizeof(cp)); len = eir_create_scan_rsp(hdev, instance, cp.data); if (hdev->scan_rsp_data_len == len && !memcmp(cp.data, hdev->scan_rsp_data, len)) return 0; memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); hdev->scan_rsp_data_len = len; cp.length = len; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; if (ext_adv_capable(hdev)) return hci_set_ext_scan_rsp_data_sync(hdev, instance); return __hci_set_scan_rsp_data_sync(hdev, instance); } int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *set; u8 data[sizeof(*cp) + sizeof(*set) * 1]; struct adv_info *adv; if (instance > 0) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; /* If already enabled there is nothing to do */ if (adv->enabled) return 0; } else { adv = NULL; } cp = (void *)data; set = (void *)cp->data; memset(cp, 0, sizeof(*cp)); cp->enable = 0x01; cp->num_of_sets = 0x01; memset(set, 0, sizeof(*set)); set->handle = adv ? adv->handle : instance; /* Set duration per instance since controller is responsible for * scheduling it. */ if (adv && adv->timeout) { u16 duration = adv->timeout * MSEC_PER_SEC; /* Time = N * 10 ms */ set->duration = cpu_to_le16(duration / 10); } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(*cp) + sizeof(*set) * cp->num_of_sets, data, HCI_CMD_TIMEOUT); } int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) { int err; err = hci_setup_ext_adv_instance_sync(hdev, instance); if (err) return err; err = hci_set_ext_scan_rsp_data_sync(hdev, instance); if (err) return err; return hci_enable_ext_advertising_sync(hdev, instance); } int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_per_adv_enable cp; struct adv_info *adv = NULL; /* If periodic advertising already disabled there is nothing to do. */ adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->periodic || !adv->enabled) return 0; memset(&cp, 0, sizeof(cp)); cp.enable = 0x00; cp.handle = instance; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, u16 min_interval, u16 max_interval) { struct hci_cp_le_set_per_adv_params cp; memset(&cp, 0, sizeof(cp)); if (!min_interval) min_interval = DISCOV_LE_PER_ADV_INT_MIN; if (!max_interval) max_interval = DISCOV_LE_PER_ADV_INT_MAX; cp.handle = instance; cp.min_interval = cpu_to_le16(min_interval); cp.max_interval = cpu_to_le16(max_interval); cp.periodic_properties = 0x0000; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) { DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length, HCI_MAX_PER_AD_LENGTH); u8 len; struct adv_info *adv = NULL; if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->periodic) return 0; } len = eir_create_per_adv_data(hdev, instance, pdu->data); pdu->length = len; pdu->handle = adv ? adv->handle : instance; pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, struct_size(pdu, data, len), pdu, HCI_CMD_TIMEOUT); } static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_per_adv_enable cp; struct adv_info *adv = NULL; /* If periodic advertising already enabled there is nothing to do. */ adv = hci_find_adv_instance(hdev, instance); if (adv && adv->periodic && adv->enabled) return 0; memset(&cp, 0, sizeof(cp)); cp.enable = 0x01; cp.handle = instance; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Checks if periodic advertising data contains a Basic Announcement and if it * does generates a Broadcast ID and add Broadcast Announcement. */ static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) { u8 bid[3]; u8 ad[HCI_MAX_EXT_AD_LENGTH]; u8 len; /* Skip if NULL adv as instance 0x00 is used for general purpose * advertising so it cannot used for the likes of Broadcast Announcement * as it can be overwritten at any point. */ if (!adv) return 0; /* Check if PA data doesn't contains a Basic Audio Announcement then * there is nothing to do. */ if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, 0x1851, NULL)) return 0; /* Check if advertising data already has a Broadcast Announcement since * the process may want to control the Broadcast ID directly and in that * case the kernel shall no interfere. */ if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, NULL)) return 0; /* Generate Broadcast ID */ get_random_bytes(bid, sizeof(bid)); len = eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); memcpy(ad + len, adv->adv_data, adv->adv_data_len); hci_set_adv_instance_data(hdev, adv->instance, len + adv->adv_data_len, ad, 0, NULL); return hci_update_adv_data_sync(hdev, adv->instance); } int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 sid, u8 data_len, u8 *data, u32 flags, u16 min_interval, u16 max_interval, u16 sync_interval) { struct adv_info *adv = NULL; int err; bool added = false; hci_disable_per_advertising_sync(hdev, instance); if (instance) { adv = hci_find_adv_instance(hdev, instance); if (adv) { if (sid != HCI_SID_INVALID && adv->sid != sid) { /* If the SID don't match attempt to find by * SID. */ adv = hci_find_adv_sid(hdev, sid); if (!adv) { bt_dev_err(hdev, "Unable to find adv_info"); return -EINVAL; } } /* Turn it into periodic advertising */ adv->periodic = true; adv->per_adv_data_len = data_len; if (data) memcpy(adv->per_adv_data, data, data_len); adv->flags = flags; } else if (!adv) { /* Create an instance if that could not be found */ adv = hci_add_per_instance(hdev, instance, sid, flags, data_len, data, sync_interval, sync_interval); if (IS_ERR(adv)) return PTR_ERR(adv); adv->pending = false; added = true; } } /* Start advertising */ err = hci_start_ext_adv_sync(hdev, instance); if (err < 0) goto fail; err = hci_adv_bcast_annoucement(hdev, adv); if (err < 0) goto fail; err = hci_set_per_adv_params_sync(hdev, instance, min_interval, max_interval); if (err < 0) goto fail; err = hci_set_per_adv_data_sync(hdev, instance); if (err < 0) goto fail; err = hci_enable_per_advertising_sync(hdev, instance); if (err < 0) goto fail; return 0; fail: if (added) hci_remove_adv_instance(hdev, instance); return err; } static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) { int err; if (ext_adv_capable(hdev)) return hci_start_ext_adv_sync(hdev, instance); err = hci_update_adv_data_sync(hdev, instance); if (err) return err; err = hci_update_scan_rsp_data_sync(hdev, instance); if (err) return err; return hci_enable_advertising_sync(hdev); } int hci_enable_advertising_sync(struct hci_dev *hdev) { struct adv_info *adv_instance; struct hci_cp_le_set_adv_param cp; u8 own_addr_type, enable = 0x01; bool connectable; u16 adv_min_interval, adv_max_interval; u32 flags; u8 status; if (ext_adv_capable(hdev)) return hci_enable_ext_advertising_sync(hdev, hdev->cur_adv_instance); flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return -EINVAL; status = hci_disable_advertising_sync(hdev); if (status) return status; /* Clear the HCI_LE_ADV bit temporarily so that the * hci_update_random_address knows that it's safe to go ahead * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ status = hci_update_random_address_sync(hdev, !connectable, adv_use_rpa(hdev, flags), &own_addr_type); if (status) return status; memset(&cp, 0, sizeof(cp)); if (adv_instance) { adv_min_interval = adv_instance->min_interval; adv_max_interval = adv_instance->max_interval; } else { adv_min_interval = hdev->le_adv_min_interval; adv_max_interval = hdev->le_adv_max_interval; } if (connectable) { cp.type = LE_ADV_IND; } else { if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) cp.type = LE_ADV_SCAN_IND; else cp.type = LE_ADV_NONCONN_IND; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; } } cp.min_interval = cpu_to_le16(adv_min_interval); cp.max_interval = cpu_to_le16(adv_max_interval); cp.own_address_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (status) return status; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); } static int enable_advertising_sync(struct hci_dev *hdev, void *data) { return hci_enable_advertising_sync(hdev); } int hci_enable_advertising(struct hci_dev *hdev) { if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) return 0; return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); } int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, struct sock *sk) { int err; if (!ext_adv_capable(hdev)) return 0; err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0 && !hci_find_adv_instance(hdev, instance)) return -EINVAL; return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance, 0, HCI_CMD_TIMEOUT, sk); } int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) { struct hci_cp_le_term_big cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) { DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length, HCI_MAX_EXT_AD_LENGTH); u8 len; struct adv_info *adv = NULL; int err; if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->adv_data_changed) return 0; } len = eir_create_adv_data(hdev, instance, pdu->data, HCI_MAX_EXT_AD_LENGTH); pdu->length = len; pdu->handle = adv ? adv->handle : instance; pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, struct_size(pdu, data, len), pdu, HCI_CMD_TIMEOUT); if (err) return err; /* Update data if the command succeed */ if (adv) { adv->adv_data_changed = false; } else { memcpy(hdev->adv_data, pdu->data, len); hdev->adv_data_len = len; } return 0; } static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_adv_data cp; u8 len; memset(&cp, 0, sizeof(cp)); len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data)); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && memcmp(cp.data, hdev->adv_data, len) == 0) return 0; memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); hdev->adv_data_len = len; cp.length = len; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) { if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; if (ext_adv_capable(hdev)) return hci_set_ext_adv_data_sync(hdev, instance); return hci_set_adv_data_sync(hdev, instance); } int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, bool force) { struct adv_info *adv = NULL; u16 timeout; if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) return -EPERM; if (hdev->adv_instance_timeout) return -EBUSY; adv = hci_find_adv_instance(hdev, instance); if (!adv) return -ENOENT; /* A zero timeout means unlimited advertising. As long as there is * only one instance, duration should be ignored. We still set a timeout * in case further instances are being added later on. * * If the remaining lifetime of the instance is more than the duration * then the timeout corresponds to the duration, otherwise it will be * reduced to the remaining instance lifetime. */ if (adv->timeout == 0 || adv->duration <= adv->remaining_time) timeout = adv->duration; else timeout = adv->remaining_time; /* The remaining time is being reduced unless the instance is being * advertised without time limit. */ if (adv->timeout) adv->remaining_time = adv->remaining_time - timeout; /* Only use work for scheduling instances with legacy advertising */ if (!ext_adv_capable(hdev)) { hdev->adv_instance_timeout = timeout; queue_delayed_work(hdev->req_workqueue, &hdev->adv_instance_expire, secs_to_jiffies(timeout)); } /* If we're just re-scheduling the same instance again then do not * execute any HCI commands. This happens when a single instance is * being advertised. */ if (!force && hdev->cur_adv_instance == instance && hci_dev_test_flag(hdev, HCI_LE_ADV)) return 0; hdev->cur_adv_instance = instance; return hci_start_adv_sync(hdev, instance); } static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) { int err; if (!ext_adv_capable(hdev)) return 0; /* Disable instance 0x00 to disable all instances */ err = hci_disable_ext_adv_instance_sync(hdev, 0x00); if (err) return err; return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); } static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) { struct adv_info *adv, *n; int err = 0; if (ext_adv_capable(hdev)) /* Remove all existing sets */ err = hci_clear_adv_sets_sync(hdev, sk); if (ext_adv_capable(hdev)) return err; /* This is safe as long as there is no command send while the lock is * held. */ hci_dev_lock(hdev); /* Cleanup non-ext instances */ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { u8 instance = adv->instance; int err; if (!(force || adv->timeout)) continue; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); } hci_dev_unlock(hdev); return 0; } static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, struct sock *sk) { int err = 0; /* If we use extended advertising, instance has to be removed first. */ if (ext_adv_capable(hdev)) err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); if (ext_adv_capable(hdev)) return err; /* This is safe as long as there is no command send while the lock is * held. */ hci_dev_lock(hdev); err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); hci_dev_unlock(hdev); return err; } /* For a single instance: * - force == true: The instance will be removed even when its remaining * lifetime is not zero. * - force == false: the instance will be deactivated but kept stored unless * the remaining lifetime is zero. * * For instance == 0x00: * - force == true: All instances will be removed regardless of their timeout * setting. * - force == false: Only instances that have a timeout will be removed. */ int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, u8 instance, bool force) { struct adv_info *next = NULL; int err; /* Cancel any timeout concerning the removed instance(s). */ if (!instance || hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); /* Get the next instance to advertise BEFORE we remove * the current one. This can be the same instance again * if there is only one instance. */ if (hdev->cur_adv_instance == instance) next = hci_get_next_instance(hdev, instance); if (!instance) { err = hci_clear_adv_sync(hdev, sk, force); if (err) return err; } else { struct adv_info *adv = hci_find_adv_instance(hdev, instance); if (force || (adv && adv->timeout && !adv->remaining_time)) { /* Don't advertise a removed instance. */ if (next && next->instance == instance) next = NULL; err = hci_remove_adv_sync(hdev, instance, sk); if (err) return err; } } if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) return 0; if (next && !ext_adv_capable(hdev)) hci_schedule_adv_instance_sync(hdev, next->instance, false); return 0; } int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) { struct hci_cp_read_rssi cp; cp.handle = handle; return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, sizeof(*cp), cp, HCI_CMD_TIMEOUT); } int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) { struct hci_cp_read_tx_power cp; cp.handle = handle; cp.type = type; return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_disable_advertising_sync(struct hci_dev *hdev) { u8 enable = 0x00; int err = 0; /* If controller is not advertising we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) return 0; if (ext_adv_capable(hdev)) err = hci_disable_ext_adv_instance_sync(hdev, 0x00); if (ext_adv_capable(hdev)) return err; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); } static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, u8 filter_dup) { struct hci_cp_le_set_ext_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = val; if (hci_dev_test_flag(hdev, HCI_MESH)) cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; else cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, u8 filter_dup) { struct hci_cp_le_set_scan_enable cp; if (use_ext_scan(hdev)) return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); memset(&cp, 0, sizeof(cp)); cp.enable = val; if (val && hci_dev_test_flag(hdev, HCI_MESH)) cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; else cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) { if (!ll_privacy_capable(hdev)) return 0; /* If controller is not/already resolving we are done. */ if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, sizeof(val), &val, HCI_CMD_TIMEOUT); } static int hci_scan_disable_sync(struct hci_dev *hdev) { int err; /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return 0; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); if (err) { bt_dev_err(hdev, "Unable to disable scanning: %d", err); return err; } return err; } static bool scan_use_rpa(struct hci_dev *hdev) { return hci_dev_test_flag(hdev, HCI_PRIVACY); } static void hci_start_interleave_scan(struct hci_dev *hdev) { hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, 0); } static void cancel_interleave_scan(struct hci_dev *hdev) { bt_dev_dbg(hdev, "cancelling interleave scan"); cancel_delayed_work_sync(&hdev->interleave_scan); hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; } /* Return true if interleave_scan wasn't started until exiting this function, * otherwise, return false */ static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) { /* Do interleaved scan only if all of the following are true: * - There is at least one ADV monitor * - At least one pending LE connection or one device to be scanned for * - Monitor offloading is not supported * If so, we should alternate between allowlist scan and one without * any filters to save power. */ bool use_interleaving = hci_is_adv_monitoring(hdev) && !(list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports)) && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE; bool is_interleaving = is_interleave_scanning(hdev); if (use_interleaving && !is_interleaving) { hci_start_interleave_scan(hdev); bt_dev_dbg(hdev, "starting interleave scan"); return true; } if (!use_interleaving && is_interleaving) cancel_interleave_scan(hdev); return false; } /* Removes connection to resolve list if needed.*/ static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct hci_cp_le_del_from_resolv_list cp; struct bdaddr_list_with_irk *entry; if (!ll_privacy_capable(hdev)) return 0; /* Check if the IRK has been programmed */ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, bdaddr_type); if (!entry) return 0; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_del_accept_list_sync(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct hci_cp_le_del_from_accept_list cp; int err; /* Check if device is on accept list before removing it */ if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) return 0; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); /* Ignore errors when removing from resolving list as that is likely * that the device was never added. */ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) { bt_dev_err(hdev, "Unable to remove from allow list: %d", err); return err; } bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, cp.bdaddr_type); return 0; } struct conn_params { bdaddr_t addr; u8 addr_type; hci_conn_flags_t flags; u8 privacy_mode; }; /* Adds connection to resolve list if needed. * Setting params to NULL programs local hdev->irk */ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, struct conn_params *params) { struct hci_cp_le_add_to_resolv_list cp; struct smp_irk *irk; struct bdaddr_list_with_irk *entry; struct hci_conn_params *p; if (!ll_privacy_capable(hdev)) return 0; /* Attempt to program local identity address, type and irk if params is * NULL. */ if (!params) { if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return 0; hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); memcpy(cp.peer_irk, hdev->irk, 16); goto done; } else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) return 0; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); if (!irk) return 0; /* Check if the IK has _not_ been programmed yet. */ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, ¶ms->addr, params->addr_type); if (entry) return 0; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, ¶ms->addr); memcpy(cp.peer_irk, irk->val, 16); /* Default privacy mode is always Network */ params->privacy_mode = HCI_NETWORK_PRIVACY; rcu_read_lock(); p = hci_pend_le_action_lookup(&hdev->pend_le_conns, ¶ms->addr, params->addr_type); if (!p) p = hci_pend_le_action_lookup(&hdev->pend_le_reports, ¶ms->addr, params->addr_type); if (p) WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); rcu_read_unlock(); done: if (hci_dev_test_flag(hdev, HCI_PRIVACY)) memcpy(cp.local_irk, hdev->irk, 16); else memset(cp.local_irk, 0, 16); return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Set Device Privacy Mode. */ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, struct conn_params *params) { struct hci_cp_le_set_privacy_mode cp; struct smp_irk *irk; if (!ll_privacy_capable(hdev) || !(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) return 0; /* If device privacy mode has already been set there is nothing to do */ if (params->privacy_mode == HCI_DEVICE_PRIVACY) return 0; /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also * indicates that LL Privacy has been enabled and * HCI_OP_LE_SET_PRIVACY_MODE is supported. */ if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) return 0; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); if (!irk) return 0; memset(&cp, 0, sizeof(cp)); cp.bdaddr_type = irk->addr_type; bacpy(&cp.bdaddr, &irk->bdaddr); cp.mode = HCI_DEVICE_PRIVACY; /* Note: params->privacy_mode is not updated since it is a copy */ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Adds connection to allow list if needed, if the device uses RPA (has IRK) * this attempts to program the device in the resolving list as well and * properly set the privacy mode. */ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, struct conn_params *params, u8 *num_entries) { struct hci_cp_le_add_to_accept_list cp; int err; /* During suspend, only wakeable devices can be in acceptlist */ if (hdev->suspended && !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) { hci_le_del_accept_list_sync(hdev, ¶ms->addr, params->addr_type); return 0; } /* Select filter policy to accept all advertising */ if (*num_entries >= hdev->le_accept_list_size) return -ENOSPC; /* Attempt to program the device in the resolving list first to avoid * having to rollback in case it fails since the resolving list is * dynamic it can probably be smaller than the accept list. */ err = hci_le_add_resolve_list_sync(hdev, params); if (err) { bt_dev_err(hdev, "Unable to add to resolve list: %d", err); return err; } /* Set Privacy Mode */ err = hci_le_set_privacy_mode_sync(hdev, params); if (err) { bt_dev_err(hdev, "Unable to set privacy mode: %d", err); return err; } /* Check if already in accept list */ if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, params->addr_type)) return 0; *num_entries += 1; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, ¶ms->addr); err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) { bt_dev_err(hdev, "Unable to add to allow list: %d", err); /* Rollback the device from the resolving list */ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); return err; } bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, cp.bdaddr_type); return 0; } /* This function disables/pause all advertising instances */ static int hci_pause_advertising_sync(struct hci_dev *hdev) { int err; int old_state; /* If already been paused there is nothing to do. */ if (hdev->advertising_paused) return 0; bt_dev_dbg(hdev, "Pausing directed advertising"); /* Stop directed advertising */ old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); if (old_state) { /* When discoverable timeout triggers, then just make sure * the limited discoverable flag is cleared. Even in the case * of a timeout triggered from general discoverable, it is * safe to unconditionally clear the flag. */ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hdev->discov_timeout = 0; } bt_dev_dbg(hdev, "Pausing advertising instances"); /* Call to disable any advertisements active on the controller. * This will succeed even if no advertisements are configured. */ err = hci_disable_advertising_sync(hdev); if (err) return err; /* If we are using software rotation, pause the loop */ if (!ext_adv_capable(hdev)) cancel_adv_timeout(hdev); hdev->advertising_paused = true; hdev->advertising_old_state = old_state; return 0; } /* This function enables all user advertising instances */ static int hci_resume_advertising_sync(struct hci_dev *hdev) { struct adv_info *adv, *tmp; int err; /* If advertising has not been paused there is nothing to do. */ if (!hdev->advertising_paused) return 0; /* Resume directed advertising */ hdev->advertising_paused = false; if (hdev->advertising_old_state) { hci_dev_set_flag(hdev, HCI_ADVERTISING); hdev->advertising_old_state = 0; } bt_dev_dbg(hdev, "Resuming advertising instances"); if (ext_adv_capable(hdev)) { /* Call for each tracked instance to be re-enabled */ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { err = hci_enable_ext_advertising_sync(hdev, adv->instance); if (!err) continue; /* If the instance cannot be resumed remove it */ hci_remove_ext_adv_instance_sync(hdev, adv->instance, NULL); } } else { /* Schedule for most recent instance to be restarted and begin * the software rotation loop */ err = hci_schedule_adv_instance_sync(hdev, hdev->cur_adv_instance, true); } hdev->advertising_paused = false; return err; } static int hci_pause_addr_resolution(struct hci_dev *hdev) { int err; if (!ll_privacy_capable(hdev)) return 0; if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return 0; /* Cannot disable addr resolution if scanning is enabled or * when initiating an LE connection. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || hci_lookup_le_connect(hdev)) { bt_dev_err(hdev, "Command not allowed when scan/LE connect"); return -EPERM; } /* Cannot disable addr resolution if advertising is enabled. */ err = hci_pause_advertising_sync(hdev); if (err) { bt_dev_err(hdev, "Pause advertising failed: %d", err); return err; } err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); if (err) bt_dev_err(hdev, "Unable to disable Address Resolution: %d", err); /* Return if address resolution is disabled and RPA is not used. */ if (!err && scan_use_rpa(hdev)) return 0; hci_resume_advertising_sync(hdev); return err; } struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool extended, struct sock *sk) { u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : HCI_OP_READ_LOCAL_OOB_DATA; return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); } static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) { struct hci_conn_params *params; struct conn_params *p; size_t i; rcu_read_lock(); i = 0; list_for_each_entry_rcu(params, list, action) ++i; *n = i; rcu_read_unlock(); p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL); if (!p) return NULL; rcu_read_lock(); i = 0; list_for_each_entry_rcu(params, list, action) { /* Racing adds are handled in next scan update */ if (i >= *n) break; /* No hdev->lock, but: addr, addr_type are immutable. * privacy_mode is only written by us or in * hci_cc_le_set_privacy_mode that we wait for. * We should be idempotent so MGMT updating flags * while we are processing is OK. */ bacpy(&p[i].addr, ¶ms->addr); p[i].addr_type = params->addr_type; p[i].flags = READ_ONCE(params->flags); p[i].privacy_mode = READ_ONCE(params->privacy_mode); ++i; } rcu_read_unlock(); *n = i; return p; } /* Clear LE Accept List */ static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) { if (!(hdev->commands[26] & 0x80)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, HCI_CMD_TIMEOUT); } /* Device must not be scanning when updating the accept list. * * Update is done using the following sequence: * * ll_privacy_capable((Disable Advertising) -> Disable Resolving List) -> * Remove Devices From Accept List -> * (has IRK && ll_privacy_capable(Remove Devices From Resolving List))-> * Add Devices to Accept List -> * (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) -> * ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * * In case of failure advertising shall be restored to its original state and * return would disable accept list since either accept or resolving list could * not be programmed. * */ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) { struct conn_params *params; struct bdaddr_list *b, *t; u8 num_entries = 0; bool pend_conn, pend_report; u8 filter_policy; size_t i, n; int err; /* Pause advertising if resolving list can be used as controllers * cannot accept resolving list modifications while advertising. */ if (ll_privacy_capable(hdev)) { err = hci_pause_advertising_sync(hdev); if (err) { bt_dev_err(hdev, "pause advertising failed: %d", err); return 0x00; } } /* Disable address resolution while reprogramming accept list since * devices that do have an IRK will be programmed in the resolving list * when LL Privacy is enabled. */ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); if (err) { bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); goto done; } /* Force address filtering if PA Sync is in progress */ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { struct hci_conn *conn; conn = hci_conn_hash_lookup_create_pa_sync(hdev); if (conn) { struct conn_params pa; memset(&pa, 0, sizeof(pa)); bacpy(&pa.addr, &conn->dst); pa.addr_type = conn->dst_type; /* Clear first since there could be addresses left * behind. */ hci_le_clear_accept_list_sync(hdev); num_entries = 1; err = hci_le_add_accept_list_sync(hdev, &pa, &num_entries); goto done; } } /* Go through the current accept list programmed into the * controller one by one and check if that address is connected or is * still in the list of pending connections or list of devices to * report. If not present in either list, then remove it from * the controller. */ list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) continue; /* Pointers not dereferenced, no locks needed */ pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, &b->bdaddr, b->bdaddr_type); pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, &b->bdaddr, b->bdaddr_type); /* If the device is not likely to connect or report, * remove it from the acceptlist. */ if (!pend_conn && !pend_report) { hci_le_del_accept_list_sync(hdev, &b->bdaddr, b->bdaddr_type); continue; } num_entries++; } /* Since all no longer valid accept list entries have been * removed, walk through the list of pending connections * and ensure that any new device gets programmed into * the controller. * * If the list of the devices is larger than the list of * available accept list entries in the controller, then * just abort and return filer policy value to not use the * accept list. * * The list and params may be mutated while we wait for events, * so make a copy and iterate it. */ params = conn_params_copy(&hdev->pend_le_conns, &n); if (!params) { err = -ENOMEM; goto done; } for (i = 0; i < n; ++i) { err = hci_le_add_accept_list_sync(hdev, ¶ms[i], &num_entries); if (err) { kvfree(params); goto done; } } kvfree(params); /* After adding all new pending connections, walk through * the list of pending reports and also add these to the * accept list if there is still space. Abort if space runs out. */ params = conn_params_copy(&hdev->pend_le_reports, &n); if (!params) { err = -ENOMEM; goto done; } for (i = 0; i < n; ++i) { err = hci_le_add_accept_list_sync(hdev, ¶ms[i], &num_entries); if (err) { kvfree(params); goto done; } } kvfree(params); /* Use the allowlist unless the following conditions are all true: * - We are not currently suspending * - There are 1 or more ADV monitors registered and it's not offloaded * - Interleaved scanning is not currently using the allowlist */ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) err = -EINVAL; done: filter_policy = err ? 0x00 : 0x01; /* Enable address resolution when LL Privacy is enabled. */ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); if (err) bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); /* Resume advertising if it was paused */ if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* Select filter policy to use accept list */ return filter_policy; } static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp, u8 type, u16 interval, u16 window) { cp->type = type; cp->interval = cpu_to_le16(interval); cp->window = cpu_to_le16(window); } static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy) { struct hci_cp_le_set_ext_scan_params *cp; struct hci_cp_le_scan_phy_params *phy; u8 data[sizeof(*cp) + sizeof(*phy) * 2]; u8 num_phy = 0x00; cp = (void *)data; phy = (void *)cp->data; memset(data, 0, sizeof(data)); cp->own_addr_type = own_addr_type; cp->filter_policy = filter_policy; /* Check if PA Sync is in progress then select the PHY based on the * hci_conn.iso_qos. */ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { struct hci_cp_le_add_to_accept_list *sent; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); if (sent) { struct hci_conn *conn; conn = hci_conn_hash_lookup_ba(hdev, BIS_LINK, &sent->bdaddr); if (conn) { struct bt_iso_qos *qos = &conn->iso_qos; if (qos->bcast.in.phy & BT_ISO_PHY_1M || qos->bcast.in.phy & BT_ISO_PHY_2M) { cp->scanning_phys |= LE_SCAN_PHY_1M; hci_le_scan_phy_params(phy, type, interval, window); num_phy++; phy++; } if (qos->bcast.in.phy & BT_ISO_PHY_CODED) { cp->scanning_phys |= LE_SCAN_PHY_CODED; hci_le_scan_phy_params(phy, type, interval * 3, window * 3); num_phy++; phy++; } if (num_phy) goto done; } } } if (scan_1m(hdev) || scan_2m(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_1M; hci_le_scan_phy_params(phy, type, interval, window); num_phy++; phy++; } if (scan_coded(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_CODED; hci_le_scan_phy_params(phy, type, interval * 3, window * 3); num_phy++; phy++; } done: if (!num_phy) return -EINVAL; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, sizeof(*cp) + sizeof(*phy) * num_phy, data, HCI_CMD_TIMEOUT); } static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy) { struct hci_cp_le_set_scan_param cp; if (use_ext_scan(hdev)) return hci_le_set_ext_scan_param_sync(hdev, type, interval, window, own_addr_type, filter_policy); memset(&cp, 0, sizeof(cp)); cp.type = type; cp.interval = cpu_to_le16(interval); cp.window = cpu_to_le16(window); cp.own_address_type = own_addr_type; cp.filter_policy = filter_policy; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy, u8 filter_dup) { int err; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } err = hci_le_set_scan_param_sync(hdev, type, interval, window, own_addr_type, filter_policy); if (err) return err; return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); } static int hci_passive_scan_sync(struct hci_dev *hdev) { u8 own_addr_type; u8 filter_policy; u16 window, interval; u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; int err; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } err = hci_scan_disable_sync(hdev); if (err) { bt_dev_err(hdev, "disable scanning failed: %d", err); return err; } /* Set require_privacy to false since no SCAN_REQ are send * during passive scanning. Not using an non-resolvable address * here is important so that peer devices using direct * advertising with our address will be correctly reported * by the controller. */ if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev), &own_addr_type)) return 0; if (hdev->enable_advmon_interleave_scan && hci_update_interleaved_scan_sync(hdev)) return 0; bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); /* Adding or removing entries from the accept list must * happen before enabling scanning. The controller does * not allow accept list modification while scanning. */ filter_policy = hci_update_accept_list_sync(hdev); /* If suspended and filter_policy set to 0x00 (no acceptlist) then * passive scanning cannot be started since that would require the host * to be woken up to process the reports. */ if (hdev->suspended && !filter_policy) { /* Check if accept list is empty then there is no need to scan * while suspended. */ if (list_empty(&hdev->le_accept_list)) return 0; /* If there are devices is the accept_list that means some * devices could not be programmed which in non-suspended case * means filter_policy needs to be set to 0x00 so the host needs * to filter, but since this is treating suspended case we * can ignore device needing host to filter to allow devices in * the acceptlist to be able to wakeup the system. */ filter_policy = 0x01; } /* When the controller is using random resolvable addresses and * with that having LE privacy enabled, then controllers with * Extended Scanner Filter Policies support can now enable support * for handling directed advertising. * * So instead of using filter polices 0x00 (no acceptlist) * and 0x01 (acceptlist enabled) use the new filter policies * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). */ if (hci_dev_test_flag(hdev, HCI_PRIVACY) && (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) filter_policy |= 0x02; if (hdev->suspended) { window = hdev->le_scan_window_suspend; interval = hdev->le_scan_int_suspend; } else if (hci_is_le_conn_scanning(hdev)) { window = hdev->le_scan_window_connect; interval = hdev->le_scan_int_connect; } else if (hci_is_adv_monitoring(hdev)) { window = hdev->le_scan_window_adv_monitor; interval = hdev->le_scan_int_adv_monitor; /* Disable duplicates filter when scanning for advertisement * monitor for the following reasons. * * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm * controllers ignore RSSI_Sampling_Period when the duplicates * filter is enabled. * * For SW pattern filtering, when we're not doing interleaved * scanning, it is necessary to disable duplicates filter, * otherwise hosts can only receive one advertisement and it's * impossible to know if a peer is still in range. */ filter_dups = LE_SCAN_FILTER_DUP_DISABLE; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; } /* Disable all filtering for Mesh */ if (hci_dev_test_flag(hdev, HCI_MESH)) { filter_policy = 0; filter_dups = LE_SCAN_FILTER_DUP_DISABLE; } bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, own_addr_type, filter_policy, filter_dups); } /* This function controls the passive scanning based on hdev->pend_le_conns * list. If there are pending LE connection we start the background scanning, * otherwise we stop it in the following sequence: * * If there are devices to scan: * * Disable Scanning -> Update Accept List -> * ll_privacy_capable((Disable Advertising) -> Disable Resolving List -> * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * * Otherwise: * * Disable Scanning */ int hci_update_passive_scan_sync(struct hci_dev *hdev) { int err; if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_AUTO_OFF) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; /* No point in doing scanning if LE support hasn't been enabled */ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; /* If discovery is active don't interfere with it */ if (hdev->discovery.state != DISCOVERY_STOPPED) return 0; /* Reset RSSI and UUID filters when starting background scanning * since these filters are meant for service discovery only. * * The Start Discovery and Start Service Discovery operations * ensure to set proper values for RSSI threshold and UUID * filter list. So it is safe to just reset them here. */ hci_discovery_filter_clear(hdev); bt_dev_dbg(hdev, "ADV monitoring is %s", hci_is_adv_monitoring(hdev) ? "on" : "off"); if (!hci_dev_test_flag(hdev, HCI_MESH) && list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports) && !hci_is_adv_monitoring(hdev) && !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { /* If there is no pending LE connections or devices * to be scanned for or no ADV monitors, we should stop the * background scanning. */ bt_dev_dbg(hdev, "stopping background scanning"); err = hci_scan_disable_sync(hdev); if (err) bt_dev_err(hdev, "stop background scanning failed: %d", err); } else { /* If there is at least one pending LE connection, we should * keep the background scan running. */ /* If controller is connecting, we should not start scanning * since some controllers are not able to scan and connect at * the same time. */ if (hci_lookup_le_connect(hdev)) return 0; bt_dev_dbg(hdev, "start background scanning"); err = hci_passive_scan_sync(hdev); if (err) bt_dev_err(hdev, "start background scanning failed: %d", err); } return err; } static int update_scan_sync(struct hci_dev *hdev, void *data) { return hci_update_scan_sync(hdev); } int hci_update_scan(struct hci_dev *hdev) { return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); } static int update_passive_scan_sync(struct hci_dev *hdev, void *data) { return hci_update_passive_scan_sync(hdev); } int hci_update_passive_scan(struct hci_dev *hdev) { /* Only queue if it would have any effect */ if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_AUTO_OFF) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, NULL); } int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) { int err; if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) return 0; err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, sizeof(val), &val, HCI_CMD_TIMEOUT); if (!err) { if (val) { hdev->features[1][0] |= LMP_HOST_SC; hci_dev_set_flag(hdev, HCI_SC_ENABLED); } else { hdev->features[1][0] &= ~LMP_HOST_SC; hci_dev_clear_flag(hdev, HCI_SC_ENABLED); } } return err; } int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) { int err; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || lmp_host_ssp_capable(hdev)) return 0; if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); } err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); if (err) return err; return hci_write_sc_support_sync(hdev, 0x01); } int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) { struct hci_cp_write_le_host_supported cp; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || !lmp_bredr_capable(hdev)) return 0; /* Check first if we already have the right host state * (host features set) */ if (le == lmp_host_le_capable(hdev) && simul == lmp_host_le_br_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); cp.le = le; cp.simul = simul; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_powered_update_adv_sync(struct hci_dev *hdev) { struct adv_info *adv, *tmp; int err; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; /* If RPA Resolution has not been enable yet it means the * resolving list is empty and we should attempt to program the * local IRK in order to support using own_addr_type * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). */ if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { hci_le_add_resolve_list_sync(hdev, NULL); hci_le_set_addr_resolution_enable_sync(hdev, 0x01); } /* Make sure the controller has a good default for * advertising data. This also applies to the case * where BR/EDR was toggled during the AUTO_OFF phase. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) { err = hci_setup_ext_adv_instance_sync(hdev, 0x00); if (!err) hci_update_scan_rsp_data_sync(hdev, 0x00); } else { err = hci_update_adv_data_sync(hdev, 0x00); if (!err) hci_update_scan_rsp_data_sync(hdev, 0x00); } if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) hci_enable_advertising_sync(hdev); } /* Call for each tracked instance to be scheduled */ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) hci_schedule_adv_instance_sync(hdev, adv->instance, true); return 0; } static int hci_write_auth_enable_sync(struct hci_dev *hdev) { u8 link_sec; link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(link_sec), &link_sec, HCI_CMD_TIMEOUT); } int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) { struct hci_cp_write_page_scan_activity cp; u8 type; int err = 0; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; memset(&cp, 0, sizeof(cp)); if (enable) { type = PAGE_SCAN_TYPE_INTERLACED; /* 160 msec page scan interval */ cp.interval = cpu_to_le16(0x0100); } else { type = hdev->def_page_scan_type; cp.interval = cpu_to_le16(hdev->def_page_scan_int); } cp.window = cpu_to_le16(hdev->def_page_scan_window); if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || __cpu_to_le16(hdev->page_scan_window) != cp.window) { err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) return err; } if (hdev->page_scan_type != type) err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, sizeof(type), &type, HCI_CMD_TIMEOUT); return err; } static bool disconnected_accept_list_entries(struct hci_dev *hdev) { struct bdaddr_list *b; list_for_each_entry(b, &hdev->accept_list, list) { struct hci_conn *conn; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); if (!conn) return true; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) return true; } return false; } static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) { return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(val), &val, HCI_CMD_TIMEOUT); } int hci_update_scan_sync(struct hci_dev *hdev) { u8 scan; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (!hdev_is_powered(hdev)) return 0; if (mgmt_powering_down(hdev)) return 0; if (hdev->scanning_paused) return 0; if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || disconnected_accept_list_entries(hdev)) scan = SCAN_PAGE; else scan = SCAN_DISABLED; if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) scan |= SCAN_INQUIRY; if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) return 0; return hci_write_scan_enable_sync(hdev, scan); } int hci_update_name_sync(struct hci_dev *hdev) { struct hci_cp_write_local_name cp; memset(&cp, 0, sizeof(cp)); memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* This function perform powered update HCI command sequence after the HCI init * sequence which end up resetting all states, the sequence is as follows: * * HCI_SSP_ENABLED(Enable SSP) * HCI_LE_ENABLED(Enable LE) * HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) -> * Update adv data) * Enable Authentication * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> * Set Name -> Set EIR) * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address) */ int hci_powered_update_sync(struct hci_dev *hdev) { int err; /* Register the available SMP channels (BR/EDR and LE) only when * successfully powering on the controller. This late * registration is required so that LE SMP can clearly decide if * the public address or static address is used. */ smp_register(hdev); err = hci_write_ssp_mode_sync(hdev, 0x01); if (err) return err; err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00); if (err) return err; err = hci_powered_update_adv_sync(hdev); if (err) return err; err = hci_write_auth_enable_sync(hdev); if (err) return err; if (lmp_bredr_capable(hdev)) { if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) hci_write_fast_connectable_sync(hdev, true); else hci_write_fast_connectable_sync(hdev, false); hci_update_scan_sync(hdev); hci_update_class_sync(hdev); hci_update_name_sync(hdev); hci_update_eir_sync(hdev); } /* If forcing static address is in use or there is no public * address use the static address as random address (but skip * the HCI command if the current random address is already the * static one. * * In case BR/EDR has been disabled on a dual-mode controller * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) { if (bacmp(&hdev->static_addr, BDADDR_ANY)) return hci_set_random_addr_sync(hdev, &hdev->static_addr); } return 0; } /** * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address * (BD_ADDR) for a HCI device from * a firmware node property. * @hdev: The HCI device * * Search the firmware node for 'local-bd-address'. * * All-zero BD addresses are rejected, because those could be properties * that exist in the firmware tables, but were not updated by the firmware. For * example, the DTS could define 'local-bd-address', with zero BD addresses. */ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) { struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); bdaddr_t ba; int ret; ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", (u8 *)&ba, sizeof(ba)); if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) return; if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks)) baswap(&hdev->public_addr, &ba); else bacpy(&hdev->public_addr, &ba); } struct hci_init_stage { int (*func)(struct hci_dev *hdev); }; /* Run init stage NULL terminated function table */ static int hci_init_stage_sync(struct hci_dev *hdev, const struct hci_init_stage *stage) { size_t i; for (i = 0; stage[i].func; i++) { int err; err = stage[i].func(hdev); if (err) return err; } return 0; } /* Read Local Version */ static int hci_read_local_version_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_CMD_TIMEOUT); } /* Read BD Address */ static int hci_read_bd_addr_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, HCI_CMD_TIMEOUT); } #define HCI_INIT(_func) \ { \ .func = _func, \ } static const struct hci_init_stage hci_init0[] = { /* HCI_OP_READ_LOCAL_VERSION */ HCI_INIT(hci_read_local_version_sync), /* HCI_OP_READ_BD_ADDR */ HCI_INIT(hci_read_bd_addr_sync), {} }; int hci_reset_sync(struct hci_dev *hdev) { int err; set_bit(HCI_RESET, &hdev->flags); err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT); if (err) return err; return 0; } static int hci_init0_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); /* Reset */ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { err = hci_reset_sync(hdev); if (err) return err; } return hci_init_stage_sync(hdev, hci_init0); } static int hci_unconf_init_sync(struct hci_dev *hdev) { int err; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return 0; err = hci_init0_sync(hdev); if (err < 0) return err; if (hci_dev_test_flag(hdev, HCI_SETUP)) hci_debugfs_create_basic(hdev); return 0; } /* Read Local Supported Features. */ static int hci_read_local_features_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL, HCI_CMD_TIMEOUT); } /* BR Controller init stage 1 command sequence */ static const struct hci_init_stage br_init1[] = { /* HCI_OP_READ_LOCAL_FEATURES */ HCI_INIT(hci_read_local_features_sync), /* HCI_OP_READ_LOCAL_VERSION */ HCI_INIT(hci_read_local_version_sync), /* HCI_OP_READ_BD_ADDR */ HCI_INIT(hci_read_bd_addr_sync), {} }; /* Read Local Commands */ static int hci_read_local_cmds_sync(struct hci_dev *hdev) { /* All Bluetooth 1.2 and later controllers should support the * HCI command for reading the local supported commands. * * Unfortunately some controllers indicate Bluetooth 1.2 support, * but do not have support for this command. If that is the case, * the driver can quirk the behavior and skip reading the local * supported commands. */ if (hdev->hci_ver > BLUETOOTH_VER_1_1 && !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL, HCI_CMD_TIMEOUT); return 0; } static int hci_init1_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); /* Reset */ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { err = hci_reset_sync(hdev); if (err) return err; } return hci_init_stage_sync(hdev, br_init1); } /* Read Buffer Size (ACL mtu, max pkt, etc.) */ static int hci_read_buffer_size_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Class of Device */ static int hci_read_dev_class_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Local Name */ static int hci_read_local_name_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Voice Setting */ static int hci_read_voice_setting_sync(struct hci_dev *hdev) { if (!read_voice_setting_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Number of Supported IAC */ static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Current IAC LAP */ static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, u8 cond_type, bdaddr_t *bdaddr, u8 auto_accept) { struct hci_cp_set_event_filter cp; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); cp.flt_type = flt_type; if (flt_type != HCI_FLT_CLEAR_ALL) { cp.cond_type = cond_type; bacpy(&cp.addr_conn_flt.bdaddr, bdaddr); cp.addr_conn_flt.auto_accept = auto_accept; } return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, flt_type == HCI_FLT_CLEAR_ALL ? sizeof(cp.flt_type) : sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_clear_event_filter_sync(struct hci_dev *hdev) { if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) return 0; /* In theory the state machine should not reach here unless * a hci_set_event_filter_sync() call succeeds, but we do * the check both for parity and as a future reminder. */ if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) return 0; return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, BDADDR_ANY, 0x00); } /* Connection accept timeout ~20 secs */ static int hci_write_ca_timeout_sync(struct hci_dev *hdev) { __le16 param = cpu_to_le16(0x7d00); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, sizeof(param), ¶m, HCI_CMD_TIMEOUT); } /* Enable SCO flow control if supported */ static int hci_write_sync_flowctl_sync(struct hci_dev *hdev) { struct hci_cp_write_sync_flowctl cp; int err; /* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */ if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) || !test_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); cp.enable = 0x01; err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (!err) hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL); return err; } /* BR Controller init stage 2 command sequence */ static const struct hci_init_stage br_init2[] = { /* HCI_OP_READ_BUFFER_SIZE */ HCI_INIT(hci_read_buffer_size_sync), /* HCI_OP_READ_CLASS_OF_DEV */ HCI_INIT(hci_read_dev_class_sync), /* HCI_OP_READ_LOCAL_NAME */ HCI_INIT(hci_read_local_name_sync), /* HCI_OP_READ_VOICE_SETTING */ HCI_INIT(hci_read_voice_setting_sync), /* HCI_OP_READ_NUM_SUPPORTED_IAC */ HCI_INIT(hci_read_num_supported_iac_sync), /* HCI_OP_READ_CURRENT_IAC_LAP */ HCI_INIT(hci_read_current_iac_lap_sync), /* HCI_OP_SET_EVENT_FLT */ HCI_INIT(hci_clear_event_filter_sync), /* HCI_OP_WRITE_CA_TIMEOUT */ HCI_INIT(hci_write_ca_timeout_sync), /* HCI_OP_WRITE_SYNC_FLOWCTL */ HCI_INIT(hci_write_sync_flowctl_sync), {} }; static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) { u8 mode = 0x01; if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return 0; /* When SSP is available, then the host features page * should also be available as well. However some * controllers list the max_page as 0 as long as SSP * has not been enabled. To achieve proper debugging * output, force the minimum max_page to 1 at least. */ hdev->max_page = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); } static int hci_write_eir_sync(struct hci_dev *hdev) { struct hci_cp_write_eir cp; if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return 0; memset(hdev->eir, 0, sizeof(hdev->eir)); memset(&cp, 0, sizeof(cp)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) { u8 mode; if (!lmp_inq_rssi_capable(hdev) && !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) return 0; /* If Extended Inquiry Result events are supported, then * they are clearly preferred over Inquiry Result with RSSI * events. */ mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); } static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) { if (!lmp_inq_tx_pwr_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) { struct hci_cp_read_local_ext_features cp; if (!lmp_ext_feat_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); cp.page = page; return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) { return hci_read_local_ext_features_sync(hdev, 0x01); } /* HCI Controller init stage 2 command sequence */ static const struct hci_init_stage hci_init2[] = { /* HCI_OP_READ_LOCAL_COMMANDS */ HCI_INIT(hci_read_local_cmds_sync), /* HCI_OP_WRITE_SSP_MODE */ HCI_INIT(hci_write_ssp_mode_1_sync), /* HCI_OP_WRITE_EIR */ HCI_INIT(hci_write_eir_sync), /* HCI_OP_WRITE_INQUIRY_MODE */ HCI_INIT(hci_write_inquiry_mode_sync), /* HCI_OP_READ_INQ_RSP_TX_POWER */ HCI_INIT(hci_read_inq_rsp_tx_power_sync), /* HCI_OP_READ_LOCAL_EXT_FEATURES */ HCI_INIT(hci_read_local_ext_features_1_sync), /* HCI_OP_WRITE_AUTH_ENABLE */ HCI_INIT(hci_write_auth_enable_sync), {} }; /* Read LE Buffer Size */ static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) { /* Use Read LE Buffer Size V2 if supported */ if (iso_capable(hdev) && hdev->commands[41] & 0x20) return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE_V2, 0, NULL, HCI_CMD_TIMEOUT); return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Local Supported Features */ static int hci_le_read_local_features_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Supported States */ static int hci_le_read_supported_states_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL, HCI_CMD_TIMEOUT); } /* LE Controller init stage 2 command sequence */ static const struct hci_init_stage le_init2[] = { /* HCI_OP_LE_READ_LOCAL_FEATURES */ HCI_INIT(hci_le_read_local_features_sync), /* HCI_OP_LE_READ_BUFFER_SIZE */ HCI_INIT(hci_le_read_buffer_size_sync), /* HCI_OP_LE_READ_SUPPORTED_STATES */ HCI_INIT(hci_le_read_supported_states_sync), {} }; static int hci_init2_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_init_stage_sync(hdev, hci_init2); if (err) return err; if (lmp_bredr_capable(hdev)) { err = hci_init_stage_sync(hdev, br_init2); if (err) return err; } else { hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); } if (lmp_le_capable(hdev)) { err = hci_init_stage_sync(hdev, le_init2); if (err) return err; /* LE-only controllers have LE implicitly enabled */ if (!lmp_bredr_capable(hdev)) hci_dev_set_flag(hdev, HCI_LE_ENABLED); } return 0; } static int hci_set_event_mask_sync(struct hci_dev *hdev) { /* The second byte is 0xff instead of 0x9f (two reserved bits * disabled) since a Broadcom 1.2 dongle doesn't respond to the * command otherwise. */ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; /* CSR 1.1 dongles does not accept any bitfield so don't try to set * any event mask for pre 1.2 devices. */ if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; if (lmp_bredr_capable(hdev)) { events[4] |= 0x01; /* Flow Specification Complete */ /* Don't set Disconnect Complete and mode change when * suspended as that would wakeup the host when disconnecting * due to suspend. */ if (hdev->suspended) { events[0] &= 0xef; events[2] &= 0xf7; } } else { /* Use a different default for LE-only devices */ memset(events, 0, sizeof(events)); events[1] |= 0x20; /* Command Complete */ events[1] |= 0x40; /* Command Status */ events[1] |= 0x80; /* Hardware Error */ /* If the controller supports the Disconnect command, enable * the corresponding event. In addition enable packet flow * control related events. */ if (hdev->commands[0] & 0x20) { /* Don't set Disconnect Complete when suspended as that * would wakeup the host when disconnecting due to * suspend. */ if (!hdev->suspended) events[0] |= 0x10; /* Disconnection Complete */ events[2] |= 0x04; /* Number of Completed Packets */ events[3] |= 0x02; /* Data Buffer Overflow */ } /* If the controller supports the Read Remote Version * Information command, enable the corresponding event. */ if (hdev->commands[2] & 0x80) events[1] |= 0x08; /* Read Remote Version Information * Complete */ if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { events[0] |= 0x80; /* Encryption Change */ events[5] |= 0x80; /* Encryption Key Refresh Complete */ } } if (lmp_inq_rssi_capable(hdev) || test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) events[4] |= 0x02; /* Inquiry Result with RSSI */ if (lmp_ext_feat_capable(hdev)) events[4] |= 0x04; /* Read Remote Extended Features Complete */ if (lmp_esco_capable(hdev)) { events[5] |= 0x08; /* Synchronous Connection Complete */ events[5] |= 0x10; /* Synchronous Connection Changed */ } if (lmp_sniffsubr_capable(hdev)) events[5] |= 0x20; /* Sniff Subrating */ if (lmp_pause_enc_capable(hdev)) events[5] |= 0x80; /* Encryption Key Refresh Complete */ if (lmp_ext_inq_capable(hdev)) events[5] |= 0x40; /* Extended Inquiry Result */ if (lmp_no_flush_capable(hdev)) events[7] |= 0x01; /* Enhanced Flush Complete */ if (lmp_lsto_capable(hdev)) events[6] |= 0x80; /* Link Supervision Timeout Changed */ if (lmp_ssp_capable(hdev)) { events[6] |= 0x01; /* IO Capability Request */ events[6] |= 0x02; /* IO Capability Response */ events[6] |= 0x04; /* User Confirmation Request */ events[6] |= 0x08; /* User Passkey Request */ events[6] |= 0x10; /* Remote OOB Data Request */ events[6] |= 0x20; /* Simple Pairing Complete */ events[7] |= 0x04; /* User Passkey Notification */ events[7] |= 0x08; /* Keypress Notification */ events[7] |= 0x10; /* Remote Host Supported * Features Notification */ } if (lmp_le_capable(hdev)) events[7] |= 0x20; /* LE Meta-Event */ return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events, HCI_CMD_TIMEOUT); } static int hci_read_stored_link_key_sync(struct hci_dev *hdev) { struct hci_cp_read_stored_link_key cp; if (!(hdev->commands[6] & 0x20) || test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, BDADDR_ANY); cp.read_all = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_setup_link_policy_sync(struct hci_dev *hdev) { struct hci_cp_write_def_link_policy cp; u16 link_policy = 0; if (!(hdev->commands[5] & 0x10)) return 0; memset(&cp, 0, sizeof(cp)); if (lmp_rswitch_capable(hdev)) link_policy |= HCI_LP_RSWITCH; if (lmp_hold_capable(hdev)) link_policy |= HCI_LP_HOLD; if (lmp_sniff_capable(hdev)) link_policy |= HCI_LP_SNIFF; if (lmp_park_capable(hdev)) link_policy |= HCI_LP_PARK; cp.policy = cpu_to_le16(link_policy); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) { if (!(hdev->commands[8] & 0x01)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) { if (!(hdev->commands[18] & 0x04) || !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_read_page_scan_type_sync(struct hci_dev *hdev) { /* Some older Broadcom based Bluetooth 1.2 controllers do not * support the Read Page Scan Type command. Check support for * this command in the bit mask of supported commands. */ if (!(hdev->commands[13] & 0x01) || test_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read features beyond page 1 if available */ static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) { u8 page; int err; if (!lmp_ext_feat_capable(hdev)) return 0; for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; page++) { err = hci_read_local_ext_features_sync(hdev, page); if (err) return err; } return 0; } /* HCI Controller init stage 3 command sequence */ static const struct hci_init_stage hci_init3[] = { /* HCI_OP_SET_EVENT_MASK */ HCI_INIT(hci_set_event_mask_sync), /* HCI_OP_READ_STORED_LINK_KEY */ HCI_INIT(hci_read_stored_link_key_sync), /* HCI_OP_WRITE_DEF_LINK_POLICY */ HCI_INIT(hci_setup_link_policy_sync), /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ HCI_INIT(hci_read_page_scan_activity_sync), /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ HCI_INIT(hci_read_def_err_data_reporting_sync), /* HCI_OP_READ_PAGE_SCAN_TYPE */ HCI_INIT(hci_read_page_scan_type_sync), /* HCI_OP_READ_LOCAL_EXT_FEATURES */ HCI_INIT(hci_read_local_ext_features_all_sync), {} }; static int hci_le_set_event_mask_sync(struct hci_dev *hdev) { u8 events[8]; if (!lmp_le_capable(hdev)) return 0; memset(events, 0, sizeof(events)); if (hdev->le_features[0] & HCI_LE_ENCRYPTION) events[0] |= 0x10; /* LE Long Term Key Request */ /* If controller supports the Connection Parameters Request * Link Layer Procedure, enable the corresponding event. */ if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) /* LE Remote Connection Parameter Request */ events[0] |= 0x20; /* If the controller supports the Data Length Extension * feature, enable the corresponding event. */ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) events[0] |= 0x40; /* LE Data Length Change */ /* If the controller supports LL Privacy feature or LE Extended Adv, * enable the corresponding event. */ if (use_enhanced_conn_complete(hdev)) events[1] |= 0x02; /* LE Enhanced Connection Complete */ /* Mark Device Privacy if Privacy Mode is supported */ if (privacy_mode_capable(hdev)) hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY; /* Mark Address Resolution if LL Privacy is supported */ if (ll_privacy_capable(hdev)) hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION; /* If the controller supports Extended Scanner Filter * Policies, enable the corresponding event. */ if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) events[1] |= 0x04; /* LE Direct Advertising Report */ /* If the controller supports Channel Selection Algorithm #2 * feature, enable the corresponding event. */ if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) events[2] |= 0x08; /* LE Channel Selection Algorithm */ /* If the controller supports the LE Set Scan Enable command, * enable the corresponding advertising report event. */ if (hdev->commands[26] & 0x08) events[0] |= 0x02; /* LE Advertising Report */ /* If the controller supports the LE Create Connection * command, enable the corresponding event. */ if (hdev->commands[26] & 0x10) events[0] |= 0x01; /* LE Connection Complete */ /* If the controller supports the LE Connection Update * command, enable the corresponding event. */ if (hdev->commands[27] & 0x04) events[0] |= 0x04; /* LE Connection Update Complete */ /* If the controller supports the LE Read Remote Used Features * command, enable the corresponding event. */ if (hdev->commands[27] & 0x20) /* LE Read Remote Used Features Complete */ events[0] |= 0x08; /* If the controller supports the LE Read Local P-256 * Public Key command, enable the corresponding event. */ if (hdev->commands[34] & 0x02) /* LE Read Local P-256 Public Key Complete */ events[0] |= 0x80; /* If the controller supports the LE Generate DHKey * command, enable the corresponding event. */ if (hdev->commands[34] & 0x04) events[1] |= 0x01; /* LE Generate DHKey Complete */ /* If the controller supports the LE Set Default PHY or * LE Set PHY commands, enable the corresponding event. */ if (hdev->commands[35] & (0x20 | 0x40)) events[1] |= 0x08; /* LE PHY Update Complete */ /* If the controller supports LE Set Extended Scan Parameters * and LE Set Extended Scan Enable commands, enable the * corresponding event. */ if (use_ext_scan(hdev)) events[1] |= 0x10; /* LE Extended Advertising Report */ /* If the controller supports the LE Extended Advertising * command, enable the corresponding event. */ if (ext_adv_capable(hdev)) events[2] |= 0x02; /* LE Advertising Set Terminated */ if (cis_capable(hdev)) { events[3] |= 0x01; /* LE CIS Established */ if (cis_peripheral_capable(hdev)) events[3] |= 0x02; /* LE CIS Request */ } if (bis_capable(hdev)) { events[1] |= 0x20; /* LE PA Report */ events[1] |= 0x40; /* LE PA Sync Established */ events[3] |= 0x04; /* LE Create BIG Complete */ events[3] |= 0x08; /* LE Terminate BIG Complete */ events[3] |= 0x10; /* LE BIG Sync Established */ events[3] |= 0x20; /* LE BIG Sync Loss */ events[4] |= 0x02; /* LE BIG Info Advertising Report */ } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events, HCI_CMD_TIMEOUT); } /* Read LE Advertising Channel TX Power */ static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) { if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { /* HCI TS spec forbids mixing of legacy and extended * advertising commands wherein READ_ADV_TX_POWER is * also included. So do not call it if extended adv * is supported otherwise controller will return * COMMAND_DISALLOWED for extended commands. */ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL, HCI_CMD_TIMEOUT); } return 0; } /* Read LE Min/Max Tx Power*/ static int hci_le_read_tx_power_sync(struct hci_dev *hdev) { if (!(hdev->commands[38] & 0x80) || test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Accept List Size */ static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) { if (!(hdev->commands[26] & 0x40)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Resolving List Size */ static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) { if (!(hdev->commands[34] & 0x40)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Clear LE Resolving List */ static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) { if (!(hdev->commands[34] & 0x20)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, HCI_CMD_TIMEOUT); } /* Set RPA timeout */ static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) { __le16 timeout = cpu_to_le16(hdev->rpa_timeout); if (!(hdev->commands[35] & 0x04) || test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, sizeof(timeout), &timeout, HCI_CMD_TIMEOUT); } /* Read LE Maximum Data Length */ static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) { if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Suggested Default Data Length */ static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) { if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Number of Supported Advertising Sets */ static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) { if (!ext_adv_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 0, NULL, HCI_CMD_TIMEOUT); } /* Write LE Host Supported */ static int hci_set_le_support_sync(struct hci_dev *hdev) { struct hci_cp_write_le_host_supported cp; /* LE-only devices do not support explicit enablement */ if (!lmp_bredr_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { cp.le = 0x01; cp.simul = 0x00; } if (cp.le == lmp_host_le_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* LE Set Host Feature */ static int hci_le_set_host_feature_sync(struct hci_dev *hdev) { struct hci_cp_le_set_host_feature cp; if (!cis_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); /* Connected Isochronous Channels (Host Support) */ cp.bit_number = 32; cp.bit_value = 1; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* LE Controller init stage 3 command sequence */ static const struct hci_init_stage le_init3[] = { /* HCI_OP_LE_SET_EVENT_MASK */ HCI_INIT(hci_le_set_event_mask_sync), /* HCI_OP_LE_READ_ADV_TX_POWER */ HCI_INIT(hci_le_read_adv_tx_power_sync), /* HCI_OP_LE_READ_TRANSMIT_POWER */ HCI_INIT(hci_le_read_tx_power_sync), /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ HCI_INIT(hci_le_read_accept_list_size_sync), /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ HCI_INIT(hci_le_clear_accept_list_sync), /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ HCI_INIT(hci_le_read_resolv_list_size_sync), /* HCI_OP_LE_CLEAR_RESOLV_LIST */ HCI_INIT(hci_le_clear_resolv_list_sync), /* HCI_OP_LE_SET_RPA_TIMEOUT */ HCI_INIT(hci_le_set_rpa_timeout_sync), /* HCI_OP_LE_READ_MAX_DATA_LEN */ HCI_INIT(hci_le_read_max_data_len_sync), /* HCI_OP_LE_READ_DEF_DATA_LEN */ HCI_INIT(hci_le_read_def_data_len_sync), /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ HCI_INIT(hci_le_read_num_support_adv_sets_sync), /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ HCI_INIT(hci_set_le_support_sync), /* HCI_OP_LE_SET_HOST_FEATURE */ HCI_INIT(hci_le_set_host_feature_sync), {} }; static int hci_init3_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_init_stage_sync(hdev, hci_init3); if (err) return err; if (lmp_le_capable(hdev)) return hci_init_stage_sync(hdev, le_init3); return 0; } static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) { struct hci_cp_delete_stored_link_key cp; /* Some Broadcom based Bluetooth controllers do not support the * Delete Stored Link Key command. They are clearly indicating its * absence in the bit mask of supported commands. * * Check the supported commands and only if the command is marked * as supported send it. If not supported assume that the controller * does not have actual support for stored link keys which makes this * command redundant anyway. * * Some controllers indicate that they support handling deleting * stored link keys, but they don't. The quirk lets a driver * just disable this command. */ if (!(hdev->commands[6] & 0x80) || test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, BDADDR_ANY); cp.delete_all = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) { u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; bool changed = false; /* Set event mask page 2 if the HCI command for it is supported */ if (!(hdev->commands[22] & 0x04)) return 0; /* If Connectionless Peripheral Broadcast central role is supported * enable all necessary events for it. */ if (lmp_cpb_central_capable(hdev)) { events[1] |= 0x40; /* Triggered Clock Capture */ events[1] |= 0x80; /* Synchronization Train Complete */ events[2] |= 0x08; /* Truncated Page Complete */ events[2] |= 0x20; /* CPB Channel Map Change */ changed = true; } /* If Connectionless Peripheral Broadcast peripheral role is supported * enable all necessary events for it. */ if (lmp_cpb_peripheral_capable(hdev)) { events[2] |= 0x01; /* Synchronization Train Received */ events[2] |= 0x02; /* CPB Receive */ events[2] |= 0x04; /* CPB Timeout */ events[2] |= 0x10; /* Peripheral Page Response Timeout */ changed = true; } /* Enable Authenticated Payload Timeout Expired event if supported */ if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { events[2] |= 0x80; changed = true; } /* Some Broadcom based controllers indicate support for Set Event * Mask Page 2 command, but then actually do not support it. Since * the default value is all bits set to zero, the command is only * required if the event mask has to be changed. In case no change * to the event mask is needed, skip this command. */ if (!changed) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events, HCI_CMD_TIMEOUT); } /* Read local codec list if the HCI command is supported */ static int hci_read_local_codecs_sync(struct hci_dev *hdev) { if (hdev->commands[45] & 0x04) hci_read_supported_codecs_v2(hdev); else if (hdev->commands[29] & 0x20) hci_read_supported_codecs(hdev); return 0; } /* Read local pairing options if the HCI command is supported */ static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) { if (!(hdev->commands[41] & 0x08)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL, HCI_CMD_TIMEOUT); } /* Get MWS transport configuration if the HCI command is supported */ static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) { if (!mws_transport_config_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL, HCI_CMD_TIMEOUT); } /* Check for Synchronization Train support */ static int hci_read_sync_train_params_sync(struct hci_dev *hdev) { if (!lmp_sync_train_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL, HCI_CMD_TIMEOUT); } /* Enable Secure Connections if supported and configured */ static int hci_write_sc_support_1_sync(struct hci_dev *hdev) { u8 support = 0x01; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || !bredr_sc_enabled(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, sizeof(support), &support, HCI_CMD_TIMEOUT); } /* Set erroneous data reporting if supported to the wideband speech * setting value */ static int hci_set_err_data_report_sync(struct hci_dev *hdev) { struct hci_cp_write_def_err_data_reporting cp; bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); if (!(hdev->commands[18] & 0x08) || !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) return 0; if (enabled == hdev->err_data_reporting) return 0; memset(&cp, 0, sizeof(cp)); cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : ERR_DATA_REPORTING_DISABLED; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static const struct hci_init_stage hci_init4[] = { /* HCI_OP_DELETE_STORED_LINK_KEY */ HCI_INIT(hci_delete_stored_link_key_sync), /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ HCI_INIT(hci_set_event_mask_page_2_sync), /* HCI_OP_READ_LOCAL_CODECS */ HCI_INIT(hci_read_local_codecs_sync), /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ HCI_INIT(hci_read_local_pairing_opts_sync), /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ HCI_INIT(hci_get_mws_transport_config_sync), /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ HCI_INIT(hci_read_sync_train_params_sync), /* HCI_OP_WRITE_SC_SUPPORT */ HCI_INIT(hci_write_sc_support_1_sync), /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ HCI_INIT(hci_set_err_data_report_sync), {} }; /* Set Suggested Default Data Length to maximum if supported */ static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) { struct hci_cp_le_write_def_data_len cp; if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) return 0; memset(&cp, 0, sizeof(cp)); cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Set Default PHY parameters if command is supported, enables all supported * PHYs according to the LE Features bits. */ static int hci_le_set_default_phy_sync(struct hci_dev *hdev) { struct hci_cp_le_set_default_phy cp; if (!(hdev->commands[35] & 0x20)) { /* If the command is not supported it means only 1M PHY is * supported. */ hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; return 0; } memset(&cp, 0, sizeof(cp)); cp.all_phys = 0x00; cp.tx_phys = HCI_LE_SET_PHY_1M; cp.rx_phys = HCI_LE_SET_PHY_1M; /* Enables 2M PHY if supported */ if (le_2m_capable(hdev)) { cp.tx_phys |= HCI_LE_SET_PHY_2M; cp.rx_phys |= HCI_LE_SET_PHY_2M; } /* Enables Coded PHY if supported */ if (le_coded_capable(hdev)) { cp.tx_phys |= HCI_LE_SET_PHY_CODED; cp.rx_phys |= HCI_LE_SET_PHY_CODED; } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static const struct hci_init_stage le_init4[] = { /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ HCI_INIT(hci_le_set_write_def_data_len_sync), /* HCI_OP_LE_SET_DEFAULT_PHY */ HCI_INIT(hci_le_set_default_phy_sync), {} }; static int hci_init4_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_init_stage_sync(hdev, hci_init4); if (err) return err; if (lmp_le_capable(hdev)) return hci_init_stage_sync(hdev, le_init4); return 0; } static int hci_init_sync(struct hci_dev *hdev) { int err; err = hci_init1_sync(hdev); if (err < 0) return err; if (hci_dev_test_flag(hdev, HCI_SETUP)) hci_debugfs_create_basic(hdev); err = hci_init2_sync(hdev); if (err < 0) return err; err = hci_init3_sync(hdev); if (err < 0) return err; err = hci_init4_sync(hdev); if (err < 0) return err; /* This function is only called when the controller is actually in * configured state. When the controller is marked as unconfigured, * this initialization procedure is not run. * * It means that it is possible that a controller runs through its * setup phase and then discovers missing settings. If that is the * case, then this function will not be called. It then will only * be called during the config phase. * * So only when in setup phase or config phase, create the debugfs * entries and register the SMP channels. */ if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) return 0; if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) return 0; hci_debugfs_create_common(hdev); if (lmp_bredr_capable(hdev)) hci_debugfs_create_bredr(hdev); if (lmp_le_capable(hdev)) hci_debugfs_create_le(hdev); return 0; } #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } static const struct { unsigned long quirk; const char *desc; } hci_broken_table[] = { HCI_QUIRK_BROKEN(LOCAL_COMMANDS, "HCI Read Local Supported Commands not supported"), HCI_QUIRK_BROKEN(STORED_LINK_KEY, "HCI Delete Stored Link Key command is advertised, " "but not supported."), HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, "HCI Read Default Erroneous Data Reporting command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, "HCI Read Transmit Power Level command is advertised, " "but not supported."), HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, "HCI Set Event Filter command not supported."), HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, "HCI Enhanced Setup Synchronous Connection command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, "HCI LE Set Random Private Address Timeout command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(EXT_CREATE_CONN, "HCI LE Extended Create Connection command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT, "HCI WRITE AUTH PAYLOAD TIMEOUT command leads " "to unexpected SMP errors when pairing " "and will not be used."), HCI_QUIRK_BROKEN(LE_CODED, "HCI LE Coded PHY feature bit is set, " "but its usage is not supported.") }; /* This function handles hdev setup stage: * * Calls hdev->setup * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. */ static int hci_dev_setup_sync(struct hci_dev *hdev) { int ret = 0; bool invalid_bdaddr; size_t i; if (!hci_dev_test_flag(hdev, HCI_SETUP) && !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) return 0; bt_dev_dbg(hdev, ""); hci_sock_dev_event(hdev, HCI_DEV_SETUP); if (hdev->setup) ret = hdev->setup(hdev); for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); } /* The transport driver can set the quirk to mark the * BD_ADDR invalid before creating the HCI device or in * its setup callback. */ invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); if (!ret) { if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && !bacmp(&hdev->public_addr, BDADDR_ANY)) hci_dev_get_bd_addr_from_property(hdev); if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) && hdev->set_bdaddr) { ret = hdev->set_bdaddr(hdev, &hdev->public_addr); if (!ret) invalid_bdaddr = false; } } /* The transport driver can set these quirks before * creating the HCI device or in its setup callback. * * For the invalid BD_ADDR quirk it is possible that * it becomes a valid address if the bootloader does * provide it (see above). * * In case any of them is set, the controller has to * start up as unconfigured. */ if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || invalid_bdaddr) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); /* For an unconfigured controller it is required to * read at least the version information provided by * the Read Local Version Information command. * * If the set_bdaddr driver callback is provided, then * also the original Bluetooth public device address * will be read using the Read BD Address command. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return hci_unconf_init_sync(hdev); return ret; } /* This function handles hdev init stage: * * Calls hci_dev_setup_sync to perform setup stage * Calls hci_init_sync to perform HCI command init sequence */ static int hci_dev_init_sync(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); atomic_set(&hdev->cmd_cnt, 1); set_bit(HCI_INIT, &hdev->flags); ret = hci_dev_setup_sync(hdev); if (hci_dev_test_flag(hdev, HCI_CONFIG)) { /* If public address change is configured, ensure that * the address gets programmed. If the driver does not * support changing the public address, fail the power * on procedure. */ if (bacmp(&hdev->public_addr, BDADDR_ANY) && hdev->set_bdaddr) ret = hdev->set_bdaddr(hdev, &hdev->public_addr); else ret = -EADDRNOTAVAIL; } if (!ret) { if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { ret = hci_init_sync(hdev); if (!ret && hdev->post_init) ret = hdev->post_init(hdev); } } /* If the HCI Reset command is clearing all diagnostic settings, * then they need to be reprogrammed after the init procedure * completed. */ if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) ret = hdev->set_diag(hdev, true); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { msft_do_open(hdev); aosp_do_open(hdev); } clear_bit(HCI_INIT, &hdev->flags); return ret; } int hci_dev_open_sync(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { ret = -ENODEV; goto done; } if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) { /* Check for rfkill but allow the HCI setup stage to * proceed (which in itself doesn't cause any RF activity). */ if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { ret = -ERFKILL; goto done; } /* Check for valid public address or a configured static * random address, but let the HCI setup proceed to * be able to determine if there is a public address * or not. * * In case of user channel usage, it is not important * if a public address or static random address is * available. */ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && !bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY)) { ret = -EADDRNOTAVAIL; goto done; } } if (test_bit(HCI_UP, &hdev->flags)) { ret = -EALREADY; goto done; } if (hdev->open(hdev)) { ret = -EIO; goto done; } hci_devcd_reset(hdev); set_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_OPEN); ret = hci_dev_init_sync(hdev); if (!ret) { hci_dev_hold(hdev); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_adv_instances_set_rpa_expired(hdev, true); set_bit(HCI_UP, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_UP); hci_leds_update_powered(hdev, true); if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG) && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_MGMT)) { ret = hci_powered_update_sync(hdev); mgmt_power_on(hdev, ret); } } else { /* Init failed, cleanup */ flush_work(&hdev->tx_work); /* Since hci_rx_work() is possible to awake new cmd_work * it should be flushed first to avoid unexpected call of * hci_cmd_work() */ flush_work(&hdev->rx_work); flush_work(&hdev->cmd_work); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->rx_q); if (hdev->flush) hdev->flush(hdev); if (hdev->sent_cmd) { cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } if (hdev->req_skb) { kfree_skb(hdev->req_skb); hdev->req_skb = NULL; } clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); hdev->close(hdev); hdev->flags &= BIT(HCI_RAW); } done: return ret; } /* This function requires the caller holds hdev->lock */ static void hci_pend_le_actions_clear(struct hci_dev *hdev) { struct hci_conn_params *p; list_for_each_entry(p, &hdev->le_conn_params, list) { hci_pend_le_list_del_init(p); if (p->conn) { hci_conn_drop(p->conn); hci_conn_put(p->conn); p->conn = NULL; } } BT_DBG("All LE pending actions cleared"); } static int hci_dev_shutdown(struct hci_dev *hdev) { int err = 0; /* Similar to how we first do setup and then set the exclusive access * bit for userspace, we must first unset userchannel and then clean up. * Otherwise, the kernel can't properly use the hci channel to clean up * the controller (some shutdown routines require sending additional * commands to the controller for example). */ bool was_userchannel = hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && test_bit(HCI_UP, &hdev->flags)) { /* Execute vendor specific shutdown routine */ if (hdev->shutdown) err = hdev->shutdown(hdev); } if (was_userchannel) hci_dev_set_flag(hdev, HCI_USER_CHANNEL); return err; } int hci_dev_close_sync(struct hci_dev *hdev) { bool auto_off; int err = 0; bt_dev_dbg(hdev, ""); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { disable_delayed_work(&hdev->power_off); disable_delayed_work(&hdev->ncmd_timer); disable_delayed_work(&hdev->le_scan_disable); } else { cancel_delayed_work(&hdev->power_off); cancel_delayed_work(&hdev->ncmd_timer); cancel_delayed_work(&hdev->le_scan_disable); } hci_cmd_sync_cancel_sync(hdev, ENODEV); cancel_interleave_scan(hdev); if (hdev->adv_instance_timeout) { cancel_delayed_work_sync(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } err = hci_dev_shutdown(hdev); if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { cancel_delayed_work_sync(&hdev->cmd_timer); return err; } hci_leds_update_powered(hdev, false); /* Flush RX and TX works */ flush_work(&hdev->tx_work); flush_work(&hdev->rx_work); if (hdev->discov_timeout > 0) { hdev->discov_timeout = 0; hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); } if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) cancel_delayed_work(&hdev->service_cache); if (hci_dev_test_flag(hdev, HCI_MGMT)) { struct adv_info *adv_instance; cancel_delayed_work_sync(&hdev->rpa_expired); list_for_each_entry(adv_instance, &hdev->adv_instances, list) cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); } /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ drain_workqueue(hdev->workqueue); hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_MGMT)) __mgmt_power_off(hdev); hci_inquiry_cache_flush(hdev); hci_pend_le_actions_clear(hdev); hci_conn_hash_flush(hdev); /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ smp_unregister(hdev); hci_dev_unlock(hdev); hci_sock_dev_event(hdev, HCI_DEV_DOWN); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { aosp_do_close(hdev); msft_do_close(hdev); } if (hdev->flush) hdev->flush(hdev); /* Reset device */ skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { set_bit(HCI_INIT, &hdev->flags); hci_reset_sync(hdev); clear_bit(HCI_INIT, &hdev->flags); } /* flush cmd work */ flush_work(&hdev->cmd_work); /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->raw_q); /* Drop last sent command */ if (hdev->sent_cmd) { cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } /* Drop last request */ if (hdev->req_skb) { kfree_skb(hdev->req_skb); hdev->req_skb = NULL; } clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); /* After this point our queues are empty and no tasks are scheduled. */ hdev->close(hdev); /* Clear flags */ hdev->flags &= BIT(HCI_RAW); hci_dev_clear_volatile_flags(hdev); memset(hdev->eir, 0, sizeof(hdev->eir)); memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); bacpy(&hdev->random_addr, BDADDR_ANY); hci_codec_list_clear(&hdev->local_codecs); hci_dev_put(hdev); return err; } /* This function perform power on HCI command sequence as follows: * * If controller is already up (HCI_UP) performs hci_powered_update_sync * sequence otherwise run hci_dev_open_sync which will follow with * hci_powered_update_sync after the init sequence is completed. */ static int hci_power_on_sync(struct hci_dev *hdev) { int err; if (test_bit(HCI_UP, &hdev->flags) && hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); return hci_powered_update_sync(hdev); } err = hci_dev_open_sync(hdev); if (err < 0) return err; /* During the HCI setup phase, a few error conditions are * ignored and they need to be checked now. If they are still * valid, it is important to return the device back off. */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_close_sync(hdev); } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { queue_delayed_work(hdev->req_workqueue, &hdev->power_off, HCI_AUTO_OFF_TIMEOUT); } if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { /* For unconfigured devices, set the HCI_RAW flag * so that userspace can easily identify them. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) set_bit(HCI_RAW, &hdev->flags); /* For fully configured devices, this will send * the Index Added event. For unconfigured devices, * it will send Unconfigued Index Added event. * * Devices with HCI_QUIRK_RAW_DEVICE are ignored * and no event will be send. */ mgmt_index_added(hdev); } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { /* When the controller is now configured, then it * is important to clear the HCI_RAW flag. */ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) clear_bit(HCI_RAW, &hdev->flags); /* Powering on the controller with HCI_CONFIG set only * happens with the transition from unconfigured to * configured. This will send the Index Added event. */ mgmt_index_added(hdev); } return 0; } static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) { struct hci_cp_remote_name_req_cancel cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, addr); return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_stop_discovery_sync(struct hci_dev *hdev) { struct discovery_state *d = &hdev->discovery; struct inquiry_entry *e; int err; bt_dev_dbg(hdev, "state %u", hdev->discovery.state); if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { if (test_bit(HCI_INQUIRY, &hdev->flags)) { err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL, HCI_CMD_TIMEOUT); if (err) return err; } if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_delayed_work(&hdev->le_scan_disable); err = hci_scan_disable_sync(hdev); if (err) return err; } } else { err = hci_scan_disable_sync(hdev); if (err) return err; } /* Resume advertising if it was paused */ if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* No further actions needed for LE-only discovery */ if (d->type == DISCOV_TYPE_LE) return 0; if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING); if (!e) return 0; /* Ignore cancel errors since it should interfere with stopping * of the discovery. */ hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); } return 0; } static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_disconnect cp; if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) { /* This is a BIS connection, hci_conn_del will * do the necessary cleanup. */ hci_dev_lock(hdev); hci_conn_failed(conn, reason); hci_dev_unlock(hdev); return 0; } memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is * used when suspending or powering off, where we don't want to wait * for the peer's response. */ if (reason != HCI_ERROR_REMOTE_POWER_OFF) return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, HCI_EV_DISCONN_COMPLETE, HCI_CMD_TIMEOUT, NULL); return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { /* Return reason if scanning since the connection shall probably be * cleanup directly. */ if (test_bit(HCI_CONN_SCANNING, &conn->flags)) return reason; if (conn->role == HCI_ROLE_SLAVE || test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { if (conn->type == LE_LINK) return hci_le_connect_cancel_sync(hdev, conn, reason); if (conn->type == CIS_LINK) { /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 1857: * * If this command is issued for a CIS on the Central and the * CIS is successfully terminated before being established, * then an HCI_LE_CIS_Established event shall also be sent for * this CIS with the Status Operation Cancelled by Host (0x44). */ if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) return hci_disconnect_sync(hdev, conn, reason); /* CIS with no Create CIS sent have nothing to cancel */ return HCI_ERROR_LOCAL_HOST_TERM; } if (conn->type == BIS_LINK) { /* There is no way to cancel a BIS without terminating the BIG * which is done later on connection cleanup. */ return 0; } if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is * used when suspending or powering off, where we don't want to wait * for the peer's response. */ if (reason != HCI_ERROR_REMOTE_POWER_OFF) return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst, HCI_EV_CONN_COMPLETE, HCI_CMD_TIMEOUT, NULL); return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst, HCI_CMD_TIMEOUT); } static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_reject_sync_conn_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.reason = reason; /* SCO rejection has its own limited set of * allowed error values (0x0D-0x0F). */ if (reason < 0x0d || reason > 0x0f) cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_le_reject_cis cp; memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_reject_conn_req cp; if (conn->type == CIS_LINK) return hci_le_reject_cis_sync(hdev, conn, reason); if (conn->type == BIS_LINK) return -EINVAL; if (conn->type == SCO_LINK || conn->type == ESCO_LINK) return hci_reject_sco_sync(hdev, conn, reason); memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { int err = 0; u16 handle = conn->handle; bool disconnect = false; struct hci_conn *c; switch (conn->state) { case BT_CONNECTED: case BT_CONFIG: err = hci_disconnect_sync(hdev, conn, reason); break; case BT_CONNECT: err = hci_connect_cancel_sync(hdev, conn, reason); break; case BT_CONNECT2: err = hci_reject_conn_sync(hdev, conn, reason); break; case BT_OPEN: case BT_BOUND: break; default: disconnect = true; break; } hci_dev_lock(hdev); /* Check if the connection has been cleaned up concurrently */ c = hci_conn_hash_lookup_handle(hdev, handle); if (!c || c != conn) { err = 0; goto unlock; } /* Cleanup hci_conn object if it cannot be cancelled as it * likelly means the controller and host stack are out of sync * or in case of LE it was still scanning so it can be cleanup * safely. */ if (disconnect) { conn->state = BT_CLOSED; hci_disconn_cfm(conn, reason); hci_conn_del(conn); } else { hci_conn_failed(conn, reason); } unlock: hci_dev_unlock(hdev); return err; } static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) { struct list_head *head = &hdev->conn_hash.list; struct hci_conn *conn; rcu_read_lock(); while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { /* Make sure the connection is not freed while unlocking */ conn = hci_conn_get(conn); rcu_read_unlock(); /* Disregard possible errors since hci_conn_del shall have been * called even in case of errors had occurred since it would * then cause hci_conn_failed to be called which calls * hci_conn_del internally. */ hci_abort_conn_sync(hdev, conn, reason); hci_conn_put(conn); rcu_read_lock(); } rcu_read_unlock(); return 0; } /* This function perform power off HCI command sequence as follows: * * Clear Advertising * Stop Discovery * Disconnect all connections * hci_dev_close_sync */ static int hci_power_off_sync(struct hci_dev *hdev) { int err; /* If controller is already down there is nothing to do */ if (!test_bit(HCI_UP, &hdev->flags)) return 0; hci_dev_set_flag(hdev, HCI_POWERING_DOWN); if (test_bit(HCI_ISCAN, &hdev->flags) || test_bit(HCI_PSCAN, &hdev->flags)) { err = hci_write_scan_enable_sync(hdev, 0x00); if (err) goto out; } err = hci_clear_adv_sync(hdev, NULL, false); if (err) goto out; err = hci_stop_discovery_sync(hdev); if (err) goto out; /* Terminated due to Power Off */ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); if (err) goto out; err = hci_dev_close_sync(hdev); out: hci_dev_clear_flag(hdev, HCI_POWERING_DOWN); return err; } int hci_set_powered_sync(struct hci_dev *hdev, u8 val) { if (val) return hci_power_on_sync(hdev); return hci_power_off_sync(hdev); } static int hci_write_iac_sync(struct hci_dev *hdev) { struct hci_cp_write_current_iac_lap cp; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) return 0; memset(&cp, 0, sizeof(cp)); if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { /* Limited discoverable mode */ cp.num_iac = min_t(u8, hdev->num_iac, 2); cp.iac_lap[0] = 0x00; /* LIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; cp.iac_lap[3] = 0x33; /* GIAC */ cp.iac_lap[4] = 0x8b; cp.iac_lap[5] = 0x9e; } else { /* General discoverable mode */ cp.num_iac = 1; cp.iac_lap[0] = 0x33; /* GIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; } return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, (cp.num_iac * 3) + 1, &cp, HCI_CMD_TIMEOUT); } int hci_update_discoverable_sync(struct hci_dev *hdev) { int err = 0; if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = hci_write_iac_sync(hdev); if (err) return err; err = hci_update_scan_sync(hdev); if (err) return err; err = hci_update_class_sync(hdev); if (err) return err; } /* Advertising instances don't use the global discoverable setting, so * only update AD if advertising was enabled using Set Advertising. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { err = hci_update_adv_data_sync(hdev, 0x00); if (err) return err; /* Discoverable mode affects the local advertising * address in limited privacy mode. */ if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { if (ext_adv_capable(hdev)) err = hci_start_ext_adv_sync(hdev, 0x00); else err = hci_enable_advertising_sync(hdev); } } return err; } static int update_discoverable_sync(struct hci_dev *hdev, void *data) { return hci_update_discoverable_sync(hdev); } int hci_update_discoverable(struct hci_dev *hdev) { /* Only queue if it would have any effect */ if (hdev_is_powered(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING) && hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, NULL); return 0; } int hci_update_connectable_sync(struct hci_dev *hdev) { int err; err = hci_update_scan_sync(hdev); if (err) return err; /* If BR/EDR is not enabled and we disable advertising as a * by-product of disabling connectable, we need to update the * advertising flags. */ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); /* Update the advertising parameters if necessary */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) err = hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance); else err = hci_enable_advertising_sync(hdev); if (err) return err; } return hci_update_passive_scan_sync(hdev); } int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp) { const u8 giac[3] = { 0x33, 0x8b, 0x9e }; const u8 liac[3] = { 0x00, 0x8b, 0x9e }; struct hci_cp_inquiry cp; bt_dev_dbg(hdev, ""); if (test_bit(HCI_INQUIRY, &hdev->flags)) return 0; hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_dev_unlock(hdev); memset(&cp, 0, sizeof(cp)); if (hdev->discovery.limited) memcpy(&cp.lap, liac, sizeof(cp.lap)); else memcpy(&cp.lap, giac, sizeof(cp.lap)); cp.length = length; cp.num_rsp = num_rsp; return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) { u8 own_addr_type; /* Accept list is not used for discovery */ u8 filter_policy = 0x00; /* Default is to enable duplicates filter */ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; int err; bt_dev_dbg(hdev, ""); /* If controller is scanning, it means the passive scanning is * running. Thus, we should temporarily stop it in order to set the * discovery scanning parameters. */ err = hci_scan_disable_sync(hdev); if (err) { bt_dev_err(hdev, "Unable to disable scanning: %d", err); return err; } cancel_interleave_scan(hdev); /* Pause address resolution for active scan and stop advertising if * privacy is enabled. */ err = hci_pause_addr_resolution(hdev); if (err) goto failed; /* All active scans will be done with either a resolvable private * address (when privacy feature has been enabled) or non-resolvable * private address. */ err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev), &own_addr_type); if (err < 0) own_addr_type = ADDR_LE_DEV_PUBLIC; if (hci_is_adv_monitoring(hdev) || (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && hdev->discovery.result_filtering)) { /* Duplicate filter should be disabled when some advertisement * monitor is activated, otherwise AdvMon can only receive one * advertisement for one peer(*) during active scanning, and * might report loss to these peers. * * If controller does strict duplicate filtering and the * discovery requires result filtering disables controller based * filtering since that can cause reports that would match the * host filter to not be reported. */ filter_dup = LE_SCAN_FILTER_DUP_DISABLE; } err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, hdev->le_scan_window_discovery, own_addr_type, filter_policy, filter_dup); if (!err) return err; failed: /* Resume advertising if it was paused */ if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* Resume passive scanning */ hci_update_passive_scan_sync(hdev); return err; } static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); if (err) return err; return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); } int hci_start_discovery_sync(struct hci_dev *hdev) { unsigned long timeout; int err; bt_dev_dbg(hdev, "type %u", hdev->discovery.type); switch (hdev->discovery.type) { case DISCOV_TYPE_BREDR: return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); case DISCOV_TYPE_INTERLEAVED: /* When running simultaneous discovery, the LE scanning time * should occupy the whole discovery time sine BR/EDR inquiry * and LE scanning are scheduled by the controller. * * For interleaving discovery in comparison, BR/EDR inquiry * and LE scanning are done sequentially with separate * timeouts. */ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); /* During simultaneous discovery, we double LE scan * interval. We must leave some time for the controller * to do BR/EDR inquiry. */ err = hci_start_interleaved_discovery_sync(hdev); break; } timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); break; case DISCOV_TYPE_LE: timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); break; default: return -EINVAL; } if (err) return err; bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); return 0; } static void hci_suspend_monitor_sync(struct hci_dev *hdev) { switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_MSFT: msft_suspend_sync(hdev); break; default: return; } } /* This function disables discovery and mark it as paused */ static int hci_pause_discovery_sync(struct hci_dev *hdev) { int old_state = hdev->discovery.state; int err; /* If discovery already stopped/stopping/paused there nothing to do */ if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || hdev->discovery_paused) return 0; hci_discovery_set_state(hdev, DISCOVERY_STOPPING); err = hci_stop_discovery_sync(hdev); if (err) return err; hdev->discovery_paused = true; hci_discovery_set_state(hdev, DISCOVERY_STOPPED); return 0; } static int hci_update_event_filter_sync(struct hci_dev *hdev) { struct bdaddr_list_with_flags *b; u8 scan = SCAN_DISABLED; bool scanning = test_bit(HCI_PSCAN, &hdev->flags); int err; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; /* Some fake CSR controllers lock up after setting this type of * filter, so avoid sending the request altogether. */ if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) return 0; /* Always clear event filter when starting */ hci_clear_event_filter_sync(hdev); list_for_each_entry(b, &hdev->accept_list, list) { if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) continue; bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, HCI_CONN_SETUP_ALLOW_BDADDR, &b->bdaddr, HCI_CONN_SETUP_AUTO_ON); if (err) bt_dev_dbg(hdev, "Failed to set event filter for %pMR", &b->bdaddr); else scan = SCAN_PAGE; } if (scan && !scanning) hci_write_scan_enable_sync(hdev, scan); else if (!scan && scanning) hci_write_scan_enable_sync(hdev, scan); return 0; } /* This function disables scan (BR and LE) and mark it as paused */ static int hci_pause_scan_sync(struct hci_dev *hdev) { if (hdev->scanning_paused) return 0; /* Disable page scan if enabled */ if (test_bit(HCI_PSCAN, &hdev->flags)) hci_write_scan_enable_sync(hdev, SCAN_DISABLED); hci_scan_disable_sync(hdev); hdev->scanning_paused = true; return 0; } /* This function performs the HCI suspend procedures in the follow order: * * Pause discovery (active scanning/inquiry) * Pause Directed Advertising/Advertising * Pause Scanning (passive scanning in case discovery was not active) * Disconnect all connections * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup * otherwise: * Update event mask (only set events that are allowed to wake up the host) * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) * Update passive scanning (lower duty cycle) * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE */ int hci_suspend_sync(struct hci_dev *hdev) { int err; /* If marked as suspended there nothing to do */ if (hdev->suspended) return 0; /* Mark device as suspended */ hdev->suspended = true; /* Pause discovery if not already stopped */ hci_pause_discovery_sync(hdev); /* Pause other advertisements */ hci_pause_advertising_sync(hdev); /* Suspend monitor filters */ hci_suspend_monitor_sync(hdev); /* Prevent disconnects from causing scanning to be re-enabled */ hci_pause_scan_sync(hdev); if (hci_conn_count(hdev)) { /* Soft disconnect everything (power off) */ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); if (err) { /* Set state to BT_RUNNING so resume doesn't notify */ hdev->suspend_state = BT_RUNNING; hci_resume_sync(hdev); return err; } /* Update event mask so only the allowed event can wakeup the * host. */ hci_set_event_mask_sync(hdev); } /* Only configure accept list if disconnect succeeded and wake * isn't being prevented. */ if (!hdev->wakeup || !hdev->wakeup(hdev)) { hdev->suspend_state = BT_SUSPEND_DISCONNECT; return 0; } /* Unpause to take care of updating scanning params */ hdev->scanning_paused = false; /* Enable event filter for paired devices */ hci_update_event_filter_sync(hdev); /* Update LE passive scan if enabled */ hci_update_passive_scan_sync(hdev); /* Pause scan changes again. */ hdev->scanning_paused = true; hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; return 0; } /* This function resumes discovery */ static int hci_resume_discovery_sync(struct hci_dev *hdev) { int err; /* If discovery not paused there nothing to do */ if (!hdev->discovery_paused) return 0; hdev->discovery_paused = false; hci_discovery_set_state(hdev, DISCOVERY_STARTING); err = hci_start_discovery_sync(hdev); hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED : DISCOVERY_FINDING); return err; } static void hci_resume_monitor_sync(struct hci_dev *hdev) { switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_MSFT: msft_resume_sync(hdev); break; default: return; } } /* This function resume scan and reset paused flag */ static int hci_resume_scan_sync(struct hci_dev *hdev) { if (!hdev->scanning_paused) return 0; hdev->scanning_paused = false; hci_update_scan_sync(hdev); /* Reset passive scanning to normal */ hci_update_passive_scan_sync(hdev); return 0; } /* This function performs the HCI suspend procedures in the follow order: * * Restore event mask * Clear event filter * Update passive scanning (normal duty cycle) * Resume Directed Advertising/Advertising * Resume discovery (active scanning/inquiry) */ int hci_resume_sync(struct hci_dev *hdev) { /* If not marked as suspended there nothing to do */ if (!hdev->suspended) return 0; hdev->suspended = false; /* Restore event mask */ hci_set_event_mask_sync(hdev); /* Clear any event filters and restore scan state */ hci_clear_event_filter_sync(hdev); /* Resume scanning */ hci_resume_scan_sync(hdev); /* Resume monitor filters */ hci_resume_monitor_sync(hdev); /* Resume other advertisements */ hci_resume_advertising_sync(hdev); /* Resume discovery */ hci_resume_discovery_sync(hdev); return 0; } static bool conn_use_rpa(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; return hci_dev_test_flag(hdev, HCI_PRIVACY); } static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_set_ext_adv_params cp; int err; bdaddr_t random_addr; u8 own_addr_type; err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), &own_addr_type); if (err) return err; /* Set require_privacy to false so that the remote device has a * chance of identifying us. */ err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, &own_addr_type, &random_addr); if (err) return err; memset(&cp, 0, sizeof(cp)); cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); cp.channel_map = hdev->le_adv_channel_map; cp.tx_power = HCI_TX_POWER_INVALID; cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_1M; cp.handle = 0x00; /* Use instance 0 for directed adv */ cp.own_addr_type = own_addr_type; cp.peer_addr_type = conn->dst_type; bacpy(&cp.peer_addr, &conn->dst); /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for * advertising_event_property LE_LEGACY_ADV_DIRECT_IND * does not supports advertising data when the advertising set already * contains some, the controller shall return erroc code 'Invalid * HCI Command Parameters(0x12). * So it is required to remove adv set for handle 0x00. since we use * instance 0 for directed adv. */ err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL); if (err) return err; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) return err; /* Check if random address need to be updated */ if (own_addr_type == ADDR_LE_DEV_RANDOM && bacmp(&random_addr, BDADDR_ANY) && bacmp(&random_addr, &hdev->random_addr)) { err = hci_set_adv_set_random_addr_sync(hdev, 0x00, &random_addr); if (err) return err; } return hci_enable_ext_advertising_sync(hdev, 0x00); } static int hci_le_directed_advertising_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_set_adv_param cp; u8 status; u8 own_addr_type; u8 enable; if (ext_adv_capable(hdev)) return hci_le_ext_directed_advertising_sync(hdev, conn); /* Clear the HCI_LE_ADV bit temporarily so that the * hci_update_random_address knows that it's safe to go ahead * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Set require_privacy to false so that the remote device has a * chance of identifying us. */ status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), &own_addr_type); if (status) return status; memset(&cp, 0, sizeof(cp)); /* Some controllers might reject command if intervals are not * within range for undirected advertising. * BCM20702A0 is known to be affected by this. */ cp.min_interval = cpu_to_le16(0x0020); cp.max_interval = cpu_to_le16(0x0020); cp.type = LE_ADV_DIRECT_IND; cp.own_address_type = own_addr_type; cp.direct_addr_type = conn->dst_type; bacpy(&cp.direct_addr, &conn->dst); cp.channel_map = hdev->le_adv_channel_map; status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (status) return status; enable = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); } static void set_ext_conn_params(struct hci_conn *conn, struct hci_cp_le_ext_conn_param *p) { struct hci_dev *hdev = conn->hdev; memset(p, 0, sizeof(*p)); p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); p->conn_latency = cpu_to_le16(conn->le_conn_latency); p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); p->min_ce_len = cpu_to_le16(0x0000); p->max_ce_len = cpu_to_le16(0x0000); } static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 own_addr_type) { struct hci_cp_le_ext_create_conn *cp; struct hci_cp_le_ext_conn_param *p; u8 data[sizeof(*cp) + sizeof(*p) * 3]; u32 plen; cp = (void *)data; p = (void *)cp->data; memset(cp, 0, sizeof(*cp)); bacpy(&cp->peer_addr, &conn->dst); cp->peer_addr_type = conn->dst_type; cp->own_addr_type = own_addr_type; plen = sizeof(*cp); if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M || conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) { cp->phys |= LE_SCAN_PHY_1M; set_ext_conn_params(conn, p); p++; plen += sizeof(*p); } if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M || conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) { cp->phys |= LE_SCAN_PHY_2M; set_ext_conn_params(conn, p); p++; plen += sizeof(*p); } if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED || conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) { cp->phys |= LE_SCAN_PHY_CODED; set_ext_conn_params(conn, p); plen += sizeof(*p); } return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, plen, data, HCI_EV_LE_ENHANCED_CONN_COMPLETE, conn->conn_timeout, NULL); } static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data) { struct hci_cp_le_create_conn cp; struct hci_conn_params *params; u8 own_addr_type; int err; struct hci_conn *conn = data; if (!hci_conn_valid(hdev, conn)) return -ECANCELED; bt_dev_dbg(hdev, "conn %p", conn); clear_bit(HCI_CONN_SCANNING, &conn->flags); conn->state = BT_CONNECT; /* If requested to connect as peripheral use directed advertising */ if (conn->role == HCI_ROLE_SLAVE) { /* If we're active scanning and simultaneous roles is not * enabled simply reject the attempt. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->le_scan_type == LE_SCAN_ACTIVE && !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { hci_conn_del(conn); return -EBUSY; } /* Pause advertising while doing directed advertising. */ hci_pause_advertising_sync(hdev); err = hci_le_directed_advertising_sync(hdev, conn); goto done; } /* Disable advertising if simultaneous roles is not in use. */ if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) hci_pause_advertising_sync(hdev); params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { conn->le_conn_min_interval = params->conn_min_interval; conn->le_conn_max_interval = params->conn_max_interval; conn->le_conn_latency = params->conn_latency; conn->le_supv_timeout = params->supervision_timeout; } else { conn->le_conn_min_interval = hdev->le_conn_min_interval; conn->le_conn_max_interval = hdev->le_conn_max_interval; conn->le_conn_latency = hdev->le_conn_latency; conn->le_supv_timeout = hdev->le_supv_timeout; } /* If controller is scanning, we stop it since some controllers are * not able to scan and connect at the same time. Also set the * HCI_LE_SCAN_INTERRUPTED flag so that the command complete * handler for scan disabling knows to set the correct discovery * state. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_scan_disable_sync(hdev); hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); } /* Update random address, but set require_privacy to false so * that we never connect with an non-resolvable address. */ err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), &own_addr_type); if (err) goto done; /* Send command LE Extended Create Connection if supported */ if (use_ext_conn(hdev)) { err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); goto done; } memset(&cp, 0, sizeof(cp)); cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); bacpy(&cp.peer_addr, &conn->dst); cp.peer_addr_type = conn->dst_type; cp.own_address_type = own_addr_type; cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); cp.conn_latency = cpu_to_le16(conn->le_conn_latency); cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); cp.min_ce_len = cpu_to_le16(0x0000); cp.max_ce_len = cpu_to_le16(0x0000); /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: * * If this event is unmasked and the HCI_LE_Connection_Complete event * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is * sent when a new connection has been created. */ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp, use_enhanced_conn_complete(hdev) ? HCI_EV_LE_ENHANCED_CONN_COMPLETE : HCI_EV_LE_CONN_COMPLETE, conn->conn_timeout, NULL); done: if (err == -ETIMEDOUT) hci_le_connect_cancel_sync(hdev, conn, 0x00); /* Re-enable advertising after the connection attempt is finished. */ hci_resume_advertising_sync(hdev); return err; } int hci_le_create_cis_sync(struct hci_dev *hdev) { DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f); size_t aux_num_cis = 0; struct hci_conn *conn; u8 cig = BT_ISO_QOS_CIG_UNSET; /* The spec allows only one pending LE Create CIS command at a time. If * the command is pending now, don't do anything. We check for pending * connections after each CIS Established event. * * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 2566: * * If the Host issues this command before all the * HCI_LE_CIS_Established events from the previous use of the * command have been generated, the Controller shall return the * error code Command Disallowed (0x0C). * * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 2567: * * When the Controller receives the HCI_LE_Create_CIS command, the * Controller sends the HCI_Command_Status event to the Host. An * HCI_LE_CIS_Established event will be generated for each CIS when it * is established or if it is disconnected or considered lost before * being established; until all the events are generated, the command * remains pending. */ hci_dev_lock(hdev); rcu_read_lock(); /* Wait until previous Create CIS has completed */ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) goto done; } /* Find CIG with all CIS ready */ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { struct hci_conn *link; if (hci_conn_check_create_cis(conn)) continue; cig = conn->iso_qos.ucast.cig; list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { if (hci_conn_check_create_cis(link) > 0 && link->iso_qos.ucast.cig == cig && link->state != BT_CONNECTED) { cig = BT_ISO_QOS_CIG_UNSET; break; } } if (cig != BT_ISO_QOS_CIG_UNSET) break; } if (cig == BT_ISO_QOS_CIG_UNSET) goto done; list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { struct hci_cis *cis = &cmd->cis[aux_num_cis]; if (hci_conn_check_create_cis(conn) || conn->iso_qos.ucast.cig != cig) continue; set_bit(HCI_CONN_CREATE_CIS, &conn->flags); cis->acl_handle = cpu_to_le16(conn->parent->handle); cis->cis_handle = cpu_to_le16(conn->handle); aux_num_cis++; if (aux_num_cis >= cmd->num_cis) break; } cmd->num_cis = aux_num_cis; done: rcu_read_unlock(); hci_dev_unlock(hdev); if (!aux_num_cis) return 0; /* Wait for HCI_LE_CIS_Established */ return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, struct_size(cmd, cis, cmd->num_cis), cmd, HCI_EVT_LE_CIS_ESTABLISHED, conn->conn_timeout, NULL); } int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) { struct hci_cp_le_remove_cig cp; memset(&cp, 0, sizeof(cp)); cp.cig_id = handle; return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) { struct hci_cp_le_big_term_sync cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) { struct hci_cp_le_pa_term_sync cp; memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(handle); return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, bool use_rpa, struct adv_info *adv_instance, u8 *own_addr_type, bdaddr_t *rand_addr) { int err; bacpy(rand_addr, BDADDR_ANY); /* If privacy is enabled use a resolvable private address. If * current RPA has expired then generate a new one. */ if (use_rpa) { /* If Controller supports LL Privacy use own address type is * 0x03 */ if (ll_privacy_capable(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; if (adv_instance) { if (adv_rpa_valid(adv_instance)) return 0; } else { if (rpa_valid(hdev)) return 0; } err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } bacpy(rand_addr, &hdev->rpa); return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for * non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; bacpy(rand_addr, &nrpa); return 0; } /* No privacy so use a public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } static int _update_adv_data_sync(struct hci_dev *hdev, void *data) { u8 instance = PTR_UINT(data); return hci_update_adv_data_sync(hdev, instance); } int hci_update_adv_data(struct hci_dev *hdev, u8 instance) { return hci_cmd_sync_queue(hdev, _update_adv_data_sync, UINT_PTR(instance), NULL); } static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data) { struct hci_conn *conn = data; struct inquiry_entry *ie; struct hci_cp_create_conn cp; int err; if (!hci_conn_valid(hdev, conn)) return -ECANCELED; /* Many controllers disallow HCI Create Connection while it is doing * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create * Connection. This may cause the MGMT discovering state to become false * without user space's request but it is okay since the MGMT Discovery * APIs do not promise that discovery should be done forever. Instead, * the user space monitors the status of MGMT discovering and it may * request for discovery again when this flag becomes false. */ if (test_bit(HCI_INQUIRY, &hdev->flags)) { err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL, HCI_CMD_TIMEOUT); if (err) bt_dev_warn(hdev, "Failed to cancel inquiry %d", err); } conn->state = BT_CONNECT; conn->out = true; conn->role = HCI_ROLE_MASTER; conn->attempt++; conn->link_policy = hdev->link_policy; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; ie = hci_inquiry_cache_lookup(hdev, &conn->dst); if (ie) { if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { cp.pscan_rep_mode = ie->data.pscan_rep_mode; cp.pscan_mode = ie->data.pscan_mode; cp.clock_offset = ie->data.clock_offset | cpu_to_le16(0x8000); } memcpy(conn->dev_class, ie->data.dev_class, 3); } cp.pkt_type = cpu_to_le16(conn->pkt_type); if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) cp.role_switch = 0x01; else cp.role_switch = 0x00; return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp, HCI_EV_CONN_COMPLETE, conn->conn_timeout, NULL); } int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn) { return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn, NULL); } static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) { struct hci_conn *conn = data; bt_dev_dbg(hdev, "err %d", err); if (err == -ECANCELED) return; hci_dev_lock(hdev); if (!hci_conn_valid(hdev, conn)) goto done; if (!err) { hci_connect_le_scan_cleanup(conn, 0x00); goto done; } /* Check if connection is still pending */ if (conn != hci_lookup_le_connect(hdev)) goto done; /* Flush to make sure we send create conn cancel command if needed */ flush_delayed_work(&conn->le_conn_timeout); hci_conn_failed(conn, bt_status(err)); done: hci_dev_unlock(hdev); } int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn) { return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn, create_le_conn_complete); } int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) { if (conn->state != BT_OPEN) return -EINVAL; switch (conn->type) { case ACL_LINK: return !hci_cmd_sync_dequeue_once(hdev, hci_acl_create_conn_sync, conn, NULL); case LE_LINK: return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync, conn, create_le_conn_complete); } return -ENOENT; } int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, struct hci_conn_params *params) { struct hci_cp_le_conn_update cp; memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.conn_interval_min = cpu_to_le16(params->conn_min_interval); cp.conn_interval_max = cpu_to_le16(params->conn_max_interval); cp.conn_latency = cpu_to_le16(params->conn_latency); cp.supervision_timeout = cpu_to_le16(params->supervision_timeout); cp.min_ce_len = cpu_to_le16(0x0000); cp.max_ce_len = cpu_to_le16(0x0000); return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static void create_pa_complete(struct hci_dev *hdev, void *data, int err) { struct hci_conn *conn = data; struct hci_conn *pa_sync; bt_dev_dbg(hdev, "err %d", err); if (err == -ECANCELED) return; hci_dev_lock(hdev); hci_dev_clear_flag(hdev, HCI_PA_SYNC); if (!hci_conn_valid(hdev, conn)) clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); if (!err) goto unlock; /* Add connection to indicate PA sync error */ pa_sync = hci_conn_add_unset(hdev, BIS_LINK, BDADDR_ANY, HCI_ROLE_SLAVE); if (IS_ERR(pa_sync)) goto unlock; set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); /* Notify iso layer */ hci_connect_cfm(pa_sync, bt_status(err)); unlock: hci_dev_unlock(hdev); } static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) { struct hci_cp_le_pa_create_sync cp; struct hci_conn *conn = data; struct bt_iso_qos *qos = &conn->iso_qos; int err; if (!hci_conn_valid(hdev, conn)) return -ECANCELED; if (conn->sync_handle != HCI_SYNC_HANDLE_INVALID) return -EINVAL; if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) return -EBUSY; /* Stop scanning if SID has not been set and active scanning is enabled * so we use passive scanning which will be scanning using the allow * list programmed to contain only the connection address. */ if (conn->sid == HCI_SID_INVALID && hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_scan_disable_sync(hdev); hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can * program the address in the allow list so PA advertisements can be * received. */ set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); hci_update_passive_scan_sync(hdev); /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update * it. */ if (conn->sid == HCI_SID_INVALID) __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, HCI_EV_LE_EXT_ADV_REPORT, conn->conn_timeout, NULL); memset(&cp, 0, sizeof(cp)); cp.options = qos->bcast.options; cp.sid = conn->sid; cp.addr_type = conn->dst_type; bacpy(&cp.addr, &conn->dst); cp.skip = cpu_to_le16(qos->bcast.skip); cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); cp.sync_cte_type = qos->bcast.sync_cte_type; /* The spec allows only one pending LE Periodic Advertising Create * Sync command at a time so we forcefully wait for PA Sync Established * event since cmd_work can only schedule one command at a time. * * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 2493: * * If the Host issues this command when another HCI_LE_Periodic_ * Advertising_Create_Sync command is pending, the Controller shall * return the error code Command Disallowed (0x0C). */ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC, sizeof(cp), &cp, HCI_EV_LE_PA_SYNC_ESTABLISHED, conn->conn_timeout, NULL); if (err == -ETIMEDOUT) __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL, 0, NULL, HCI_CMD_TIMEOUT); return err; } int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn) { return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn, create_pa_complete); } static void create_big_complete(struct hci_dev *hdev, void *data, int err) { struct hci_conn *conn = data; bt_dev_dbg(hdev, "err %d", err); if (err == -ECANCELED) return; if (hci_conn_valid(hdev, conn)) clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); } static int hci_le_big_create_sync(struct hci_dev *hdev, void *data) { DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11); struct hci_conn *conn = data; struct bt_iso_qos *qos = &conn->iso_qos; int err; if (!hci_conn_valid(hdev, conn)) return -ECANCELED; set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); memset(cp, 0, sizeof(*cp)); cp->handle = qos->bcast.big; cp->sync_handle = cpu_to_le16(conn->sync_handle); cp->encryption = qos->bcast.encryption; memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode)); cp->mse = qos->bcast.mse; cp->timeout = cpu_to_le16(qos->bcast.timeout); cp->num_bis = conn->num_bis; memcpy(cp->bis, conn->bis, conn->num_bis); /* The spec allows only one pending LE BIG Create Sync command at * a time, so we forcefully wait for BIG Sync Established event since * cmd_work can only schedule one command at a time. * * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 2586: * * If the Host sends this command when the Controller is in the * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_ * Established event has not been generated, the Controller shall * return the error code Command Disallowed (0x0C). */ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC, struct_size(cp, bis, cp->num_bis), cp, HCI_EVT_LE_BIG_SYNC_ESTABLISHED, conn->conn_timeout, NULL); if (err == -ETIMEDOUT) hci_le_big_terminate_sync(hdev, cp->handle); return err; } int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn) { return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn, create_big_complete); } |
56 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2008 Intel Corporation * Author: Matthew Wilcox <willy@linux.intel.com> * * Please see kernel/locking/semaphore.c for documentation of these functions */ #ifndef __LINUX_SEMAPHORE_H #define __LINUX_SEMAPHORE_H #include <linux/list.h> #include <linux/spinlock.h> /* Please don't access any members of this structure directly */ struct semaphore { raw_spinlock_t lock; unsigned int count; struct list_head wait_list; #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER unsigned long last_holder; #endif }; #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER #define __LAST_HOLDER_SEMAPHORE_INITIALIZER \ , .last_holder = 0UL #else #define __LAST_HOLDER_SEMAPHORE_INITIALIZER #endif #define __SEMAPHORE_INITIALIZER(name, n) \ { \ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \ .count = n, \ .wait_list = LIST_HEAD_INIT((name).wait_list) \ __LAST_HOLDER_SEMAPHORE_INITIALIZER \ } /* * Unlike mutexes, binary semaphores do not have an owner, so up() can * be called in a different thread from the one which called down(). * It is also safe to call down_trylock() and up() from interrupt * context. */ #define DEFINE_SEMAPHORE(_name, _n) \ struct semaphore _name = __SEMAPHORE_INITIALIZER(_name, _n) static inline void sema_init(struct semaphore *sem, int val) { static struct lock_class_key __key; *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); } extern void down(struct semaphore *sem); extern int __must_check down_interruptible(struct semaphore *sem); extern int __must_check down_killable(struct semaphore *sem); extern int __must_check down_trylock(struct semaphore *sem); extern int __must_check down_timeout(struct semaphore *sem, long jiffies); extern void up(struct semaphore *sem); extern unsigned long sem_last_holder(struct semaphore *sem); #endif /* __LINUX_SEMAPHORE_H */ |
308 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 | // SPDX-License-Identifier: GPL-2.0-only /* * Support for polling mode for input devices. */ #include <linux/device.h> #include <linux/input.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/workqueue.h> #include "input-poller.h" struct input_dev_poller { void (*poll)(struct input_dev *dev); unsigned int poll_interval; /* msec */ unsigned int poll_interval_max; /* msec */ unsigned int poll_interval_min; /* msec */ struct input_dev *input; struct delayed_work work; }; static void input_dev_poller_queue_work(struct input_dev_poller *poller) { unsigned long delay; delay = msecs_to_jiffies(poller->poll_interval); if (delay >= HZ) delay = round_jiffies_relative(delay); queue_delayed_work(system_freezable_wq, &poller->work, delay); } static void input_dev_poller_work(struct work_struct *work) { struct input_dev_poller *poller = container_of(work, struct input_dev_poller, work.work); poller->poll(poller->input); input_dev_poller_queue_work(poller); } void input_dev_poller_finalize(struct input_dev_poller *poller) { if (!poller->poll_interval) poller->poll_interval = 500; if (!poller->poll_interval_max) poller->poll_interval_max = poller->poll_interval; } void input_dev_poller_start(struct input_dev_poller *poller) { /* Only start polling if polling is enabled */ if (poller->poll_interval > 0) { poller->poll(poller->input); input_dev_poller_queue_work(poller); } } void input_dev_poller_stop(struct input_dev_poller *poller) { cancel_delayed_work_sync(&poller->work); } int input_setup_polling(struct input_dev *dev, void (*poll_fn)(struct input_dev *dev)) { struct input_dev_poller *poller; poller = kzalloc(sizeof(*poller), GFP_KERNEL); if (!poller) { /* * We want to show message even though kzalloc() may have * printed backtrace as knowing what instance of input * device we were dealing with is helpful. */ dev_err(dev->dev.parent ?: &dev->dev, "%s: unable to allocate poller structure\n", __func__); return -ENOMEM; } INIT_DELAYED_WORK(&poller->work, input_dev_poller_work); poller->input = dev; poller->poll = poll_fn; dev->poller = poller; return 0; } EXPORT_SYMBOL(input_setup_polling); static bool input_dev_ensure_poller(struct input_dev *dev) { if (!dev->poller) { dev_err(dev->dev.parent ?: &dev->dev, "poller structure has not been set up\n"); return false; } return true; } void input_set_poll_interval(struct input_dev *dev, unsigned int interval) { if (input_dev_ensure_poller(dev)) dev->poller->poll_interval = interval; } EXPORT_SYMBOL(input_set_poll_interval); void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval) { if (input_dev_ensure_poller(dev)) dev->poller->poll_interval_min = interval; } EXPORT_SYMBOL(input_set_min_poll_interval); void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval) { if (input_dev_ensure_poller(dev)) dev->poller->poll_interval_max = interval; } EXPORT_SYMBOL(input_set_max_poll_interval); int input_get_poll_interval(struct input_dev *dev) { if (!dev->poller) return -EINVAL; return dev->poller->poll_interval; } EXPORT_SYMBOL(input_get_poll_interval); /* SYSFS interface */ static ssize_t input_dev_get_poll_interval(struct device *dev, struct device_attribute *attr, char *buf) { struct input_dev *input = to_input_dev(dev); return sprintf(buf, "%d\n", input->poller->poll_interval); } static ssize_t input_dev_set_poll_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct input_dev *input = to_input_dev(dev); struct input_dev_poller *poller = input->poller; unsigned int interval; int err; err = kstrtouint(buf, 0, &interval); if (err) return err; if (interval < poller->poll_interval_min) return -EINVAL; if (interval > poller->poll_interval_max) return -EINVAL; guard(mutex)(&input->mutex); poller->poll_interval = interval; if (input_device_enabled(input)) { cancel_delayed_work_sync(&poller->work); if (poller->poll_interval > 0) input_dev_poller_queue_work(poller); } return count; } static DEVICE_ATTR(poll, 0644, input_dev_get_poll_interval, input_dev_set_poll_interval); static ssize_t input_dev_get_poll_max(struct device *dev, struct device_attribute *attr, char *buf) { struct input_dev *input = to_input_dev(dev); return sprintf(buf, "%d\n", input->poller->poll_interval_max); } static DEVICE_ATTR(max, 0444, input_dev_get_poll_max, NULL); static ssize_t input_dev_get_poll_min(struct device *dev, struct device_attribute *attr, char *buf) { struct input_dev *input = to_input_dev(dev); return sprintf(buf, "%d\n", input->poller->poll_interval_min); } static DEVICE_ATTR(min, 0444, input_dev_get_poll_min, NULL); static umode_t input_poller_attrs_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); struct input_dev *input = to_input_dev(dev); return input->poller ? attr->mode : 0; } static struct attribute *input_poller_attrs[] = { &dev_attr_poll.attr, &dev_attr_max.attr, &dev_attr_min.attr, NULL }; struct attribute_group input_poller_attribute_group = { .is_visible = input_poller_attrs_visible, .attrs = input_poller_attrs, }; |
3 3 3 3 3 3 13 13 13 13 13 13 12 13 13 17 16 13 13 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 | // SPDX-License-Identifier: GPL-2.0 /* * Wireless utility functions * * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH * Copyright (C) 2018-2023, 2025 Intel Corporation */ #include <linux/export.h> #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/ieee80211.h> #include <net/cfg80211.h> #include <net/ip.h> #include <net/dsfield.h> #include <linux/if_vlan.h> #include <linux/mpls.h> #include <linux/gcd.h> #include <linux/bitfield.h> #include <linux/nospec.h> #include "core.h" #include "rdev-ops.h" const struct ieee80211_rate * ieee80211_get_response_rate(struct ieee80211_supported_band *sband, u32 basic_rates, int bitrate) { struct ieee80211_rate *result = &sband->bitrates[0]; int i; for (i = 0; i < sband->n_bitrates; i++) { if (!(basic_rates & BIT(i))) continue; if (sband->bitrates[i].bitrate > bitrate) continue; result = &sband->bitrates[i]; } return result; } EXPORT_SYMBOL(ieee80211_get_response_rate); u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband) { struct ieee80211_rate *bitrates; u32 mandatory_rates = 0; enum ieee80211_rate_flags mandatory_flag; int i; if (WARN_ON(!sband)) return 1; if (sband->band == NL80211_BAND_2GHZ) mandatory_flag = IEEE80211_RATE_MANDATORY_B; else mandatory_flag = IEEE80211_RATE_MANDATORY_A; bitrates = sband->bitrates; for (i = 0; i < sband->n_bitrates; i++) if (bitrates[i].flags & mandatory_flag) mandatory_rates |= BIT(i); return mandatory_rates; } EXPORT_SYMBOL(ieee80211_mandatory_rates); u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band) { /* see 802.11 17.3.8.3.2 and Annex J * there are overlapping channel numbers in 5GHz and 2GHz bands */ if (chan <= 0) return 0; /* not supported */ switch (band) { case NL80211_BAND_2GHZ: case NL80211_BAND_LC: if (chan == 14) return MHZ_TO_KHZ(2484); else if (chan < 14) return MHZ_TO_KHZ(2407 + chan * 5); break; case NL80211_BAND_5GHZ: if (chan >= 182 && chan <= 196) return MHZ_TO_KHZ(4000 + chan * 5); else return MHZ_TO_KHZ(5000 + chan * 5); break; case NL80211_BAND_6GHZ: /* see 802.11ax D6.1 27.3.23.2 */ if (chan == 2) return MHZ_TO_KHZ(5935); if (chan <= 233) return MHZ_TO_KHZ(5950 + chan * 5); break; case NL80211_BAND_60GHZ: if (chan < 7) return MHZ_TO_KHZ(56160 + chan * 2160); break; case NL80211_BAND_S1GHZ: return 902000 + chan * 500; default: ; } return 0; /* not supported */ } EXPORT_SYMBOL(ieee80211_channel_to_freq_khz); enum nl80211_chan_width ieee80211_s1g_channel_width(const struct ieee80211_channel *chan) { if (WARN_ON(!chan || chan->band != NL80211_BAND_S1GHZ)) return NL80211_CHAN_WIDTH_20_NOHT; /*S1G defines a single allowed channel width per channel. * Extract that width here. */ if (chan->flags & IEEE80211_CHAN_1MHZ) return NL80211_CHAN_WIDTH_1; else if (chan->flags & IEEE80211_CHAN_2MHZ) return NL80211_CHAN_WIDTH_2; else if (chan->flags & IEEE80211_CHAN_4MHZ) return NL80211_CHAN_WIDTH_4; else if (chan->flags & IEEE80211_CHAN_8MHZ) return NL80211_CHAN_WIDTH_8; else if (chan->flags & IEEE80211_CHAN_16MHZ) return NL80211_CHAN_WIDTH_16; pr_err("unknown channel width for channel at %dKHz?\n", ieee80211_channel_to_khz(chan)); return NL80211_CHAN_WIDTH_1; } EXPORT_SYMBOL(ieee80211_s1g_channel_width); int ieee80211_freq_khz_to_channel(u32 freq) { /* TODO: just handle MHz for now */ freq = KHZ_TO_MHZ(freq); /* see 802.11 17.3.8.3.2 and Annex J */ if (freq == 2484) return 14; else if (freq < 2484) return (freq - 2407) / 5; else if (freq >= 4910 && freq <= 4980) return (freq - 4000) / 5; else if (freq < 5925) return (freq - 5000) / 5; else if (freq == 5935) return 2; else if (freq <= 45000) /* DMG band lower limit */ /* see 802.11ax D6.1 27.3.22.2 */ return (freq - 5950) / 5; else if (freq >= 58320 && freq <= 70200) return (freq - 56160) / 2160; else return 0; } EXPORT_SYMBOL(ieee80211_freq_khz_to_channel); struct ieee80211_channel *ieee80211_get_channel_khz(struct wiphy *wiphy, u32 freq) { enum nl80211_band band; struct ieee80211_supported_band *sband; int i; for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *chan = &sband->channels[i]; if (ieee80211_channel_to_khz(chan) == freq) return chan; } } return NULL; } EXPORT_SYMBOL(ieee80211_get_channel_khz); static void set_mandatory_flags_band(struct ieee80211_supported_band *sband) { int i, want; switch (sband->band) { case NL80211_BAND_5GHZ: case NL80211_BAND_6GHZ: want = 3; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == 60 || sband->bitrates[i].bitrate == 120 || sband->bitrates[i].bitrate == 240) { sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_A; want--; } } WARN_ON(want); break; case NL80211_BAND_2GHZ: case NL80211_BAND_LC: want = 7; for (i = 0; i < sband->n_bitrates; i++) { switch (sband->bitrates[i].bitrate) { case 10: case 20: case 55: case 110: sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_B | IEEE80211_RATE_MANDATORY_G; want--; break; case 60: case 120: case 240: sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_G; want--; fallthrough; default: sband->bitrates[i].flags |= IEEE80211_RATE_ERP_G; break; } } WARN_ON(want != 0 && want != 3); break; case NL80211_BAND_60GHZ: /* check for mandatory HT MCS 1..4 */ WARN_ON(!sband->ht_cap.ht_supported); WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e); break; case NL80211_BAND_S1GHZ: /* Figure 9-589bd: 3 means unsupported, so != 3 means at least * mandatory is ok. */ WARN_ON((sband->s1g_cap.nss_mcs[0] & 0x3) == 0x3); break; case NUM_NL80211_BANDS: default: WARN_ON(1); break; } } void ieee80211_set_bitrate_flags(struct wiphy *wiphy) { enum nl80211_band band; for (band = 0; band < NUM_NL80211_BANDS; band++) if (wiphy->bands[band]) set_mandatory_flags_band(wiphy->bands[band]); } bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher) { int i; for (i = 0; i < wiphy->n_cipher_suites; i++) if (cipher == wiphy->cipher_suites[i]) return true; return false; } static bool cfg80211_igtk_cipher_supported(struct cfg80211_registered_device *rdev) { struct wiphy *wiphy = &rdev->wiphy; int i; for (i = 0; i < wiphy->n_cipher_suites; i++) { switch (wiphy->cipher_suites[i]) { case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: return true; } } return false; } bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev, int key_idx, bool pairwise) { int max_key_idx; if (pairwise) max_key_idx = 3; else if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION) || wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT)) max_key_idx = 7; else if (cfg80211_igtk_cipher_supported(rdev)) max_key_idx = 5; else max_key_idx = 3; if (key_idx < 0 || key_idx > max_key_idx) return false; return true; } int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, struct key_params *params, int key_idx, bool pairwise, const u8 *mac_addr) { if (!cfg80211_valid_key_idx(rdev, key_idx, pairwise)) return -EINVAL; if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) return -EINVAL; if (pairwise && !mac_addr) return -EINVAL; switch (params->cipher) { case WLAN_CIPHER_SUITE_TKIP: /* Extended Key ID can only be used with CCMP/GCMP ciphers */ if ((pairwise && key_idx) || params->mode != NL80211_KEY_RX_TX) return -EINVAL; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: /* IEEE802.11-2016 allows only 0 and - when supporting * Extended Key ID - 1 as index for pairwise keys. * @NL80211_KEY_NO_TX is only allowed for pairwise keys when * the driver supports Extended Key ID. * @NL80211_KEY_SET_TX can't be set when installing and * validating a key. */ if ((params->mode == NL80211_KEY_NO_TX && !pairwise) || params->mode == NL80211_KEY_SET_TX) return -EINVAL; if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID)) { if (pairwise && (key_idx < 0 || key_idx > 1)) return -EINVAL; } else if (pairwise && key_idx) { return -EINVAL; } break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: /* Disallow BIP (group-only) cipher as pairwise cipher */ if (pairwise) return -EINVAL; if (key_idx < 4) return -EINVAL; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (key_idx > 3) return -EINVAL; break; default: break; } switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: if (params->key_len != WLAN_KEY_LEN_WEP40) return -EINVAL; break; case WLAN_CIPHER_SUITE_TKIP: if (params->key_len != WLAN_KEY_LEN_TKIP) return -EINVAL; break; case WLAN_CIPHER_SUITE_CCMP: if (params->key_len != WLAN_KEY_LEN_CCMP) return -EINVAL; break; case WLAN_CIPHER_SUITE_CCMP_256: if (params->key_len != WLAN_KEY_LEN_CCMP_256) return -EINVAL; break; case WLAN_CIPHER_SUITE_GCMP: if (params->key_len != WLAN_KEY_LEN_GCMP) return -EINVAL; break; case WLAN_CIPHER_SUITE_GCMP_256: if (params->key_len != WLAN_KEY_LEN_GCMP_256) return -EINVAL; break; case WLAN_CIPHER_SUITE_WEP104: if (params->key_len != WLAN_KEY_LEN_WEP104) return -EINVAL; break; case WLAN_CIPHER_SUITE_AES_CMAC: if (params->key_len != WLAN_KEY_LEN_AES_CMAC) return -EINVAL; break; case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (params->key_len != WLAN_KEY_LEN_BIP_CMAC_256) return -EINVAL; break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_128) return -EINVAL; break; case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (params->key_len != WLAN_KEY_LEN_BIP_GMAC_256) return -EINVAL; break; default: /* * We don't know anything about this algorithm, * allow using it -- but the driver must check * all parameters! We still check below whether * or not the driver supports this algorithm, * of course. */ break; } if (params->seq) { switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* These ciphers do not use key sequence */ return -EINVAL; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (params->seq_len != 6) return -EINVAL; break; } } if (!cfg80211_supported_cipher_suite(&rdev->wiphy, params->cipher)) return -EINVAL; return 0; } unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc) { unsigned int hdrlen = 24; if (ieee80211_is_ext(fc)) { hdrlen = 4; goto out; } if (ieee80211_is_data(fc)) { if (ieee80211_has_a4(fc)) hdrlen = 30; if (ieee80211_is_data_qos(fc)) { hdrlen += IEEE80211_QOS_CTL_LEN; if (ieee80211_has_order(fc)) hdrlen += IEEE80211_HT_CTL_LEN; } goto out; } if (ieee80211_is_mgmt(fc)) { if (ieee80211_has_order(fc)) hdrlen += IEEE80211_HT_CTL_LEN; goto out; } if (ieee80211_is_ctl(fc)) { /* * ACK and CTS are 10 bytes, all others 16. To see how * to get this condition consider * subtype mask: 0b0000000011110000 (0x00F0) * ACK subtype: 0b0000000011010000 (0x00D0) * CTS subtype: 0b0000000011000000 (0x00C0) * bits that matter: ^^^ (0x00E0) * value of those: 0b0000000011000000 (0x00C0) */ if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0)) hdrlen = 10; else hdrlen = 16; } out: return hdrlen; } EXPORT_SYMBOL(ieee80211_hdrlen); unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) { const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)skb->data; unsigned int hdrlen; if (unlikely(skb->len < 10)) return 0; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (unlikely(hdrlen > skb->len)) return 0; return hdrlen; } EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); static unsigned int __ieee80211_get_mesh_hdrlen(u8 flags) { int ae = flags & MESH_FLAGS_AE; /* 802.11-2012, 8.2.4.7.3 */ switch (ae) { default: case 0: return 6; case MESH_FLAGS_AE_A4: return 12; case MESH_FLAGS_AE_A5_A6: return 18; } } unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) { return __ieee80211_get_mesh_hdrlen(meshhdr->flags); } EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto) { const __be16 *hdr_proto = hdr + ETH_ALEN; if (!(ether_addr_equal(hdr, rfc1042_header) && *hdr_proto != htons(ETH_P_AARP) && *hdr_proto != htons(ETH_P_IPX)) && !ether_addr_equal(hdr, bridge_tunnel_header)) return false; *proto = *hdr_proto; return true; } EXPORT_SYMBOL(ieee80211_get_8023_tunnel_proto); int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb) { const void *mesh_addr; struct { struct ethhdr eth; u8 flags; } payload; int hdrlen; int ret; ret = skb_copy_bits(skb, 0, &payload, sizeof(payload)); if (ret) return ret; hdrlen = sizeof(payload.eth) + __ieee80211_get_mesh_hdrlen(payload.flags); if (likely(pskb_may_pull(skb, hdrlen + 8) && ieee80211_get_8023_tunnel_proto(skb->data + hdrlen, &payload.eth.h_proto))) hdrlen += ETH_ALEN + 2; else if (!pskb_may_pull(skb, hdrlen)) return -EINVAL; else payload.eth.h_proto = htons(skb->len - hdrlen); mesh_addr = skb->data + sizeof(payload.eth) + ETH_ALEN; switch (payload.flags & MESH_FLAGS_AE) { case MESH_FLAGS_AE_A4: memcpy(&payload.eth.h_source, mesh_addr, ETH_ALEN); break; case MESH_FLAGS_AE_A5_A6: memcpy(&payload.eth, mesh_addr, 2 * ETH_ALEN); break; default: break; } pskb_pull(skb, hdrlen - sizeof(payload.eth)); memcpy(skb->data, &payload.eth, sizeof(payload.eth)); return 0; } EXPORT_SYMBOL(ieee80211_strip_8023_mesh_hdr); int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, const u8 *addr, enum nl80211_iftype iftype, u8 data_offset, bool is_amsdu) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct { u8 hdr[ETH_ALEN] __aligned(2); __be16 proto; } payload; struct ethhdr tmp; u16 hdrlen; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return -1; hdrlen = ieee80211_hdrlen(hdr->frame_control) + data_offset; if (skb->len < hdrlen) return -1; /* convert IEEE 802.11 header + possible LLC headers into Ethernet * header * IEEE 802.11 address fields: * ToDS FromDS Addr1 Addr2 Addr3 Addr4 * 0 0 DA SA BSSID n/a * 0 1 DA BSSID SA n/a * 1 0 BSSID SA DA n/a * 1 1 RA TA DA SA */ memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN); memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN); switch (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { case cpu_to_le16(IEEE80211_FCTL_TODS): if (unlikely(iftype != NL80211_IFTYPE_AP && iftype != NL80211_IFTYPE_AP_VLAN && iftype != NL80211_IFTYPE_P2P_GO)) return -1; break; case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): if (unlikely(iftype != NL80211_IFTYPE_MESH_POINT && iftype != NL80211_IFTYPE_AP_VLAN && iftype != NL80211_IFTYPE_STATION)) return -1; break; case cpu_to_le16(IEEE80211_FCTL_FROMDS): if ((iftype != NL80211_IFTYPE_STATION && iftype != NL80211_IFTYPE_P2P_CLIENT && iftype != NL80211_IFTYPE_MESH_POINT) || (is_multicast_ether_addr(tmp.h_dest) && ether_addr_equal(tmp.h_source, addr))) return -1; break; case cpu_to_le16(0): if (iftype != NL80211_IFTYPE_ADHOC && iftype != NL80211_IFTYPE_STATION && iftype != NL80211_IFTYPE_OCB) return -1; break; } if (likely(!is_amsdu && iftype != NL80211_IFTYPE_MESH_POINT && skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)) == 0 && ieee80211_get_8023_tunnel_proto(&payload, &tmp.h_proto))) { /* remove RFC1042 or Bridge-Tunnel encapsulation */ hdrlen += ETH_ALEN + 2; skb_postpull_rcsum(skb, &payload, ETH_ALEN + 2); } else { tmp.h_proto = htons(skb->len - hdrlen); } pskb_pull(skb, hdrlen); if (!ehdr) ehdr = skb_push(skb, sizeof(struct ethhdr)); memcpy(ehdr, &tmp, sizeof(tmp)); return 0; } EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr); static void __frame_add_frag(struct sk_buff *skb, struct page *page, void *ptr, int len, int size) { struct skb_shared_info *sh = skb_shinfo(skb); int page_offset; get_page(page); page_offset = ptr - page_address(page); skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size); } static void __ieee80211_amsdu_copy_frag(struct sk_buff *skb, struct sk_buff *frame, int offset, int len) { struct skb_shared_info *sh = skb_shinfo(skb); const skb_frag_t *frag = &sh->frags[0]; struct page *frag_page; void *frag_ptr; int frag_len, frag_size; int head_size = skb->len - skb->data_len; int cur_len; frag_page = virt_to_head_page(skb->head); frag_ptr = skb->data; frag_size = head_size; while (offset >= frag_size) { offset -= frag_size; frag_page = skb_frag_page(frag); frag_ptr = skb_frag_address(frag); frag_size = skb_frag_size(frag); frag++; } frag_ptr += offset; frag_len = frag_size - offset; cur_len = min(len, frag_len); __frame_add_frag(frame, frag_page, frag_ptr, cur_len, frag_size); len -= cur_len; while (len > 0) { frag_len = skb_frag_size(frag); cur_len = min(len, frag_len); __frame_add_frag(frame, skb_frag_page(frag), skb_frag_address(frag), cur_len, frag_len); len -= cur_len; frag++; } } static struct sk_buff * __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen, int offset, int len, bool reuse_frag, int min_len) { struct sk_buff *frame; int cur_len = len; if (skb->len - offset < len) return NULL; /* * When reusing fragments, copy some data to the head to simplify * ethernet header handling and speed up protocol header processing * in the stack later. */ if (reuse_frag) cur_len = min_t(int, len, min_len); /* * Allocate and reserve two bytes more for payload * alignment since sizeof(struct ethhdr) is 14. */ frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len); if (!frame) return NULL; frame->priority = skb->priority; skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2); skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len); len -= cur_len; if (!len) return frame; offset += cur_len; __ieee80211_amsdu_copy_frag(skb, frame, offset, len); return frame; } static u16 ieee80211_amsdu_subframe_length(void *field, u8 mesh_flags, u8 hdr_type) { __le16 *field_le = field; __be16 *field_be = field; u16 len; if (hdr_type >= 2) len = le16_to_cpu(*field_le); else len = be16_to_cpu(*field_be); if (hdr_type) len += __ieee80211_get_mesh_hdrlen(mesh_flags); return len; } bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr) { int offset = 0, subframe_len, padding; for (offset = 0; offset < skb->len; offset += subframe_len + padding) { int remaining = skb->len - offset; struct { __be16 len; u8 mesh_flags; } hdr; u16 len; if (sizeof(hdr) > remaining) return false; if (skb_copy_bits(skb, offset + 2 * ETH_ALEN, &hdr, sizeof(hdr)) < 0) return false; len = ieee80211_amsdu_subframe_length(&hdr.len, hdr.mesh_flags, mesh_hdr); subframe_len = sizeof(struct ethhdr) + len; padding = (4 - subframe_len) & 0x3; if (subframe_len > remaining) return false; } return true; } EXPORT_SYMBOL(ieee80211_is_valid_amsdu); void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, const u8 *check_da, const u8 *check_sa, u8 mesh_control) { unsigned int hlen = ALIGN(extra_headroom, 4); struct sk_buff *frame = NULL; int offset = 0; struct { struct ethhdr eth; uint8_t flags; } hdr; bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb); bool reuse_skb = false; bool last = false; int copy_len = sizeof(hdr.eth); if (iftype == NL80211_IFTYPE_MESH_POINT) copy_len = sizeof(hdr); while (!last) { int remaining = skb->len - offset; unsigned int subframe_len; int len, mesh_len = 0; u8 padding; if (copy_len > remaining) goto purge; skb_copy_bits(skb, offset, &hdr, copy_len); if (iftype == NL80211_IFTYPE_MESH_POINT) mesh_len = __ieee80211_get_mesh_hdrlen(hdr.flags); len = ieee80211_amsdu_subframe_length(&hdr.eth.h_proto, hdr.flags, mesh_control); subframe_len = sizeof(struct ethhdr) + len; padding = (4 - subframe_len) & 0x3; /* the last MSDU has no padding */ if (subframe_len > remaining) goto purge; /* mitigate A-MSDU aggregation injection attacks */ if (ether_addr_equal(hdr.eth.h_dest, rfc1042_header)) goto purge; offset += sizeof(struct ethhdr); last = remaining <= subframe_len + padding; /* FIXME: should we really accept multicast DA? */ if ((check_da && !is_multicast_ether_addr(hdr.eth.h_dest) && !ether_addr_equal(check_da, hdr.eth.h_dest)) || (check_sa && !ether_addr_equal(check_sa, hdr.eth.h_source))) { offset += len + padding; continue; } /* reuse skb for the last subframe */ if (!skb_is_nonlinear(skb) && !reuse_frag && last) { skb_pull(skb, offset); frame = skb; reuse_skb = true; } else { frame = __ieee80211_amsdu_copy(skb, hlen, offset, len, reuse_frag, 32 + mesh_len); if (!frame) goto purge; offset += len + padding; } skb_reset_network_header(frame); frame->dev = skb->dev; frame->priority = skb->priority; if (likely(iftype != NL80211_IFTYPE_MESH_POINT && ieee80211_get_8023_tunnel_proto(frame->data, &hdr.eth.h_proto))) skb_pull(frame, ETH_ALEN + 2); memcpy(skb_push(frame, sizeof(hdr.eth)), &hdr.eth, sizeof(hdr.eth)); __skb_queue_tail(list, frame); } if (!reuse_skb) dev_kfree_skb(skb); return; purge: __skb_queue_purge(list); dev_kfree_skb(skb); } EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); /* Given a data frame determine the 802.1p/1d tag to use. */ unsigned int cfg80211_classify8021d(struct sk_buff *skb, struct cfg80211_qos_map *qos_map) { unsigned int dscp; unsigned char vlan_priority; unsigned int ret; /* skb->priority values from 256->263 are magic values to * directly indicate a specific 802.1d priority. This is used * to allow 802.1d priority to be passed directly in from VLAN * tags, etc. */ if (skb->priority >= 256 && skb->priority <= 263) { ret = skb->priority - 256; goto out; } if (skb_vlan_tag_present(skb)) { vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; if (vlan_priority > 0) { ret = vlan_priority; goto out; } } switch (skb->protocol) { case htons(ETH_P_IP): dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc; break; case htons(ETH_P_IPV6): dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc; break; case htons(ETH_P_MPLS_UC): case htons(ETH_P_MPLS_MC): { struct mpls_label mpls_tmp, *mpls; mpls = skb_header_pointer(skb, sizeof(struct ethhdr), sizeof(*mpls), &mpls_tmp); if (!mpls) return 0; ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; goto out; } case htons(ETH_P_80221): /* 802.21 is always network control traffic */ return 7; default: return 0; } if (qos_map) { unsigned int i, tmp_dscp = dscp >> 2; for (i = 0; i < qos_map->num_des; i++) { if (tmp_dscp == qos_map->dscp_exception[i].dscp) { ret = qos_map->dscp_exception[i].up; goto out; } } for (i = 0; i < 8; i++) { if (tmp_dscp >= qos_map->up[i].low && tmp_dscp <= qos_map->up[i].high) { ret = i; goto out; } } } /* The default mapping as defined Section 2.3 in RFC8325: The three * Most Significant Bits (MSBs) of the DSCP are used as the * corresponding L2 markings. */ ret = dscp >> 5; /* Handle specific DSCP values for which the default mapping (as * described above) doesn't adhere to the intended usage of the DSCP * value. See section 4 in RFC8325. Specifically, for the following * Diffserv Service Classes no update is needed: * - Standard: DF * - Low Priority Data: CS1 * - Multimedia Conferencing: AF41, AF42, AF43 * - Network Control Traffic: CS7 * - Real-Time Interactive: CS4 * - Signaling: CS5 */ switch (dscp >> 2) { case 10: case 12: case 14: /* High throughput data: AF11, AF12, AF13 */ ret = 0; break; case 16: /* Operations, Administration, and Maintenance and Provisioning: * CS2 */ ret = 0; break; case 18: case 20: case 22: /* Low latency data: AF21, AF22, AF23 */ ret = 3; break; case 24: /* Broadcasting video: CS3 */ ret = 4; break; case 26: case 28: case 30: /* Multimedia Streaming: AF31, AF32, AF33 */ ret = 4; break; case 44: /* Voice Admit: VA */ ret = 6; break; case 46: /* Telephony traffic: EF */ ret = 6; break; case 48: /* Network Control Traffic: CS6 */ ret = 7; break; } out: return array_index_nospec(ret, IEEE80211_NUM_TIDS); } EXPORT_SYMBOL(cfg80211_classify8021d); const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id) { const struct cfg80211_bss_ies *ies; ies = rcu_dereference(bss->ies); if (!ies) return NULL; return cfg80211_find_elem(id, ies->data, ies->len); } EXPORT_SYMBOL(ieee80211_bss_get_elem); void cfg80211_upload_connect_keys(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct net_device *dev = wdev->netdev; int i; if (!wdev->connect_keys) return; for (i = 0; i < 4; i++) { if (!wdev->connect_keys->params[i].cipher) continue; if (rdev_add_key(rdev, dev, -1, i, false, NULL, &wdev->connect_keys->params[i])) { netdev_err(dev, "failed to set key %d\n", i); continue; } if (wdev->connect_keys->def == i && rdev_set_default_key(rdev, dev, -1, i, true, true)) { netdev_err(dev, "failed to set defkey %d\n", i); continue; } } kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; } void cfg80211_process_wdev_events(struct wireless_dev *wdev) { struct cfg80211_event *ev; unsigned long flags; spin_lock_irqsave(&wdev->event_lock, flags); while (!list_empty(&wdev->event_list)) { ev = list_first_entry(&wdev->event_list, struct cfg80211_event, list); list_del(&ev->list); spin_unlock_irqrestore(&wdev->event_lock, flags); switch (ev->type) { case EVENT_CONNECT_RESULT: __cfg80211_connect_result( wdev->netdev, &ev->cr, ev->cr.status == WLAN_STATUS_SUCCESS); break; case EVENT_ROAMED: __cfg80211_roamed(wdev, &ev->rm); break; case EVENT_DISCONNECTED: __cfg80211_disconnected(wdev->netdev, ev->dc.ie, ev->dc.ie_len, ev->dc.reason, !ev->dc.locally_generated); break; case EVENT_IBSS_JOINED: __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid, ev->ij.channel); break; case EVENT_STOPPED: cfg80211_leave(wiphy_to_rdev(wdev->wiphy), wdev); break; case EVENT_PORT_AUTHORIZED: __cfg80211_port_authorized(wdev, ev->pa.peer_addr, ev->pa.td_bitmap, ev->pa.td_bitmap_len); break; } kfree(ev); spin_lock_irqsave(&wdev->event_lock, flags); } spin_unlock_irqrestore(&wdev->event_lock, flags); } void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; lockdep_assert_held(&rdev->wiphy.mtx); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) cfg80211_process_wdev_events(wdev); } int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, struct vif_params *params) { int err; enum nl80211_iftype otype = dev->ieee80211_ptr->iftype; lockdep_assert_held(&rdev->wiphy.mtx); /* don't support changing VLANs, you just re-create them */ if (otype == NL80211_IFTYPE_AP_VLAN) return -EOPNOTSUPP; /* cannot change into P2P device or NAN */ if (ntype == NL80211_IFTYPE_P2P_DEVICE || ntype == NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!rdev->ops->change_virtual_intf || !(rdev->wiphy.interface_modes & (1 << ntype))) return -EOPNOTSUPP; if (ntype != otype) { /* if it's part of a bridge, reject changing type to station/ibss */ if (netif_is_bridge_port(dev) && (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION || ntype == NL80211_IFTYPE_P2P_CLIENT)) return -EBUSY; dev->ieee80211_ptr->use_4addr = false; rdev_set_qos_map(rdev, dev, NULL); switch (otype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: cfg80211_stop_ap(rdev, dev, -1, true); break; case NL80211_IFTYPE_ADHOC: cfg80211_leave_ibss(rdev, dev, false); break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, true); break; case NL80211_IFTYPE_MESH_POINT: /* mesh should be handled? */ break; case NL80211_IFTYPE_OCB: cfg80211_leave_ocb(rdev, dev); break; default: break; } cfg80211_process_rdev_events(rdev); cfg80211_mlme_purge_registrations(dev->ieee80211_ptr); memset(&dev->ieee80211_ptr->u, 0, sizeof(dev->ieee80211_ptr->u)); memset(&dev->ieee80211_ptr->links, 0, sizeof(dev->ieee80211_ptr->links)); } err = rdev_change_virtual_intf(rdev, dev, ntype, params); WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); if (!err && params && params->use_4addr != -1) dev->ieee80211_ptr->use_4addr = params->use_4addr; if (!err) { dev->priv_flags &= ~IFF_DONT_BRIDGE; switch (ntype) { case NL80211_IFTYPE_STATION: if (dev->ieee80211_ptr->use_4addr) break; fallthrough; case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_ADHOC: dev->priv_flags |= IFF_DONT_BRIDGE; break; case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MESH_POINT: /* bridging OK */ break; case NL80211_IFTYPE_MONITOR: /* monitor can't bridge anyway */ break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: /* not happening */ break; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_NAN: WARN_ON(1); break; } } if (!err && ntype != otype && netif_running(dev)) { cfg80211_update_iface_num(rdev, ntype, 1); cfg80211_update_iface_num(rdev, otype, -1); } return err; } static u32 cfg80211_calculate_bitrate_ht(struct rate_info *rate) { int modulation, streams, bitrate; /* the formula below does only work for MCS values smaller than 32 */ if (WARN_ON_ONCE(rate->mcs >= 32)) return 0; modulation = rate->mcs & 7; streams = (rate->mcs >> 3) + 1; bitrate = (rate->bw == RATE_INFO_BW_40) ? 13500000 : 6500000; if (modulation < 4) bitrate *= (modulation + 1); else if (modulation == 4) bitrate *= (modulation + 2); else bitrate *= (modulation + 3); bitrate *= streams; if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) bitrate = (bitrate / 9) * 10; /* do NOT round down here */ return (bitrate + 50000) / 100000; } static u32 cfg80211_calculate_bitrate_dmg(struct rate_info *rate) { static const u32 __mcs2bitrate[] = { /* control PHY */ [0] = 275, /* SC PHY */ [1] = 3850, [2] = 7700, [3] = 9625, [4] = 11550, [5] = 12512, /* 1251.25 mbps */ [6] = 15400, [7] = 19250, [8] = 23100, [9] = 25025, [10] = 30800, [11] = 38500, [12] = 46200, /* OFDM PHY */ [13] = 6930, [14] = 8662, /* 866.25 mbps */ [15] = 13860, [16] = 17325, [17] = 20790, [18] = 27720, [19] = 34650, [20] = 41580, [21] = 45045, [22] = 51975, [23] = 62370, [24] = 67568, /* 6756.75 mbps */ /* LP-SC PHY */ [25] = 6260, [26] = 8340, [27] = 11120, [28] = 12510, [29] = 16680, [30] = 22240, [31] = 25030, }; if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate))) return 0; return __mcs2bitrate[rate->mcs]; } static u32 cfg80211_calculate_bitrate_extended_sc_dmg(struct rate_info *rate) { static const u32 __mcs2bitrate[] = { [6 - 6] = 26950, /* MCS 9.1 : 2695.0 mbps */ [7 - 6] = 50050, /* MCS 12.1 */ [8 - 6] = 53900, [9 - 6] = 57750, [10 - 6] = 63900, [11 - 6] = 75075, [12 - 6] = 80850, }; /* Extended SC MCS not defined for base MCS below 6 or above 12 */ if (WARN_ON_ONCE(rate->mcs < 6 || rate->mcs > 12)) return 0; return __mcs2bitrate[rate->mcs - 6]; } static u32 cfg80211_calculate_bitrate_edmg(struct rate_info *rate) { static const u32 __mcs2bitrate[] = { /* control PHY */ [0] = 275, /* SC PHY */ [1] = 3850, [2] = 7700, [3] = 9625, [4] = 11550, [5] = 12512, /* 1251.25 mbps */ [6] = 13475, [7] = 15400, [8] = 19250, [9] = 23100, [10] = 25025, [11] = 26950, [12] = 30800, [13] = 38500, [14] = 46200, [15] = 50050, [16] = 53900, [17] = 57750, [18] = 69300, [19] = 75075, [20] = 80850, }; if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate))) return 0; return __mcs2bitrate[rate->mcs] * rate->n_bonded_ch; } static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) { static const u32 base[4][12] = { { 6500000, 13000000, 19500000, 26000000, 39000000, 52000000, 58500000, 65000000, 78000000, /* not in the spec, but some devices use this: */ 86700000, 97500000, 108300000, }, { 13500000, 27000000, 40500000, 54000000, 81000000, 108000000, 121500000, 135000000, 162000000, 180000000, 202500000, 225000000, }, { 29300000, 58500000, 87800000, 117000000, 175500000, 234000000, 263300000, 292500000, 351000000, 390000000, 438800000, 487500000, }, { 58500000, 117000000, 175500000, 234000000, 351000000, 468000000, 526500000, 585000000, 702000000, 780000000, 877500000, 975000000, }, }; u32 bitrate; int idx; if (rate->mcs > 11) goto warn; switch (rate->bw) { case RATE_INFO_BW_160: idx = 3; break; case RATE_INFO_BW_80: idx = 2; break; case RATE_INFO_BW_40: idx = 1; break; case RATE_INFO_BW_5: case RATE_INFO_BW_10: default: goto warn; case RATE_INFO_BW_20: idx = 0; } bitrate = base[idx][rate->mcs]; bitrate *= rate->nss; if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) bitrate = (bitrate / 9) * 10; /* do NOT round down here */ return (bitrate + 50000) / 100000; warn: WARN_ONCE(1, "invalid rate bw=%d, mcs=%d, nss=%d\n", rate->bw, rate->mcs, rate->nss); return 0; } static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) { #define SCALE 6144 u32 mcs_divisors[14] = { 102399, /* 16.666666... */ 51201, /* 8.333333... */ 34134, /* 5.555555... */ 25599, /* 4.166666... */ 17067, /* 2.777777... */ 12801, /* 2.083333... */ 11377, /* 1.851725... */ 10239, /* 1.666666... */ 8532, /* 1.388888... */ 7680, /* 1.250000... */ 6828, /* 1.111111... */ 6144, /* 1.000000... */ 5690, /* 0.926106... */ 5120, /* 0.833333... */ }; u32 rates_160M[3] = { 960777777, 907400000, 816666666 }; u32 rates_996[3] = { 480388888, 453700000, 408333333 }; u32 rates_484[3] = { 229411111, 216666666, 195000000 }; u32 rates_242[3] = { 114711111, 108333333, 97500000 }; u32 rates_106[3] = { 40000000, 37777777, 34000000 }; u32 rates_52[3] = { 18820000, 17777777, 16000000 }; u32 rates_26[3] = { 9411111, 8888888, 8000000 }; u64 tmp; u32 result; if (WARN_ON_ONCE(rate->mcs > 13)) return 0; if (WARN_ON_ONCE(rate->he_gi > NL80211_RATE_INFO_HE_GI_3_2)) return 0; if (WARN_ON_ONCE(rate->he_ru_alloc > NL80211_RATE_INFO_HE_RU_ALLOC_2x996)) return 0; if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8)) return 0; if (rate->bw == RATE_INFO_BW_160 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_2x996)) result = rates_160M[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_80 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996)) result = rates_996[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_40 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484)) result = rates_484[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_20 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_242)) result = rates_242[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_106) result = rates_106[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_52) result = rates_52[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26) result = rates_26[rate->he_gi]; else { WARN(1, "invalid HE MCS: bw:%d, ru:%d\n", rate->bw, rate->he_ru_alloc); return 0; } /* now scale to the appropriate MCS */ tmp = result; tmp *= SCALE; do_div(tmp, mcs_divisors[rate->mcs]); result = tmp; /* and take NSS, DCM into account */ result = (result * rate->nss) / 8; if (rate->he_dcm) result /= 2; return result / 10000; } static u32 cfg80211_calculate_bitrate_eht(struct rate_info *rate) { #define SCALE 6144 static const u32 mcs_divisors[16] = { 102399, /* 16.666666... */ 51201, /* 8.333333... */ 34134, /* 5.555555... */ 25599, /* 4.166666... */ 17067, /* 2.777777... */ 12801, /* 2.083333... */ 11377, /* 1.851725... */ 10239, /* 1.666666... */ 8532, /* 1.388888... */ 7680, /* 1.250000... */ 6828, /* 1.111111... */ 6144, /* 1.000000... */ 5690, /* 0.926106... */ 5120, /* 0.833333... */ 409600, /* 66.666666... */ 204800, /* 33.333333... */ }; static const u32 rates_996[3] = { 480388888, 453700000, 408333333 }; static const u32 rates_484[3] = { 229411111, 216666666, 195000000 }; static const u32 rates_242[3] = { 114711111, 108333333, 97500000 }; static const u32 rates_106[3] = { 40000000, 37777777, 34000000 }; static const u32 rates_52[3] = { 18820000, 17777777, 16000000 }; static const u32 rates_26[3] = { 9411111, 8888888, 8000000 }; u64 tmp; u32 result; if (WARN_ON_ONCE(rate->mcs > 15)) return 0; if (WARN_ON_ONCE(rate->eht_gi > NL80211_RATE_INFO_EHT_GI_3_2)) return 0; if (WARN_ON_ONCE(rate->eht_ru_alloc > NL80211_RATE_INFO_EHT_RU_ALLOC_4x996)) return 0; if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8)) return 0; /* Bandwidth checks for MCS 14 */ if (rate->mcs == 14) { if ((rate->bw != RATE_INFO_BW_EHT_RU && rate->bw != RATE_INFO_BW_80 && rate->bw != RATE_INFO_BW_160 && rate->bw != RATE_INFO_BW_320) || (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc != NL80211_RATE_INFO_EHT_RU_ALLOC_996 && rate->eht_ru_alloc != NL80211_RATE_INFO_EHT_RU_ALLOC_2x996 && rate->eht_ru_alloc != NL80211_RATE_INFO_EHT_RU_ALLOC_4x996)) { WARN(1, "invalid EHT BW for MCS 14: bw:%d, ru:%d\n", rate->bw, rate->eht_ru_alloc); return 0; } } if (rate->bw == RATE_INFO_BW_320 || (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_4x996)) result = 4 * rates_996[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484) result = 3 * rates_996[rate->eht_gi] + rates_484[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_3x996) result = 3 * rates_996[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484) result = 2 * rates_996[rate->eht_gi] + rates_484[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_160 || (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_2x996)) result = 2 * rates_996[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242) result = rates_996[rate->eht_gi] + rates_484[rate->eht_gi] + rates_242[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_996P484) result = rates_996[rate->eht_gi] + rates_484[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_80 || (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_996)) result = rates_996[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_484P242) result = rates_484[rate->eht_gi] + rates_242[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_40 || (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_484)) result = rates_484[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_20 || (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_242)) result = rates_242[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_106P26) result = rates_106[rate->eht_gi] + rates_26[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_106) result = rates_106[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_52P26) result = rates_52[rate->eht_gi] + rates_26[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_52) result = rates_52[rate->eht_gi]; else if (rate->bw == RATE_INFO_BW_EHT_RU && rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_26) result = rates_26[rate->eht_gi]; else { WARN(1, "invalid EHT MCS: bw:%d, ru:%d\n", rate->bw, rate->eht_ru_alloc); return 0; } /* now scale to the appropriate MCS */ tmp = result; tmp *= SCALE; do_div(tmp, mcs_divisors[rate->mcs]); /* and take NSS */ tmp *= rate->nss; do_div(tmp, 8); result = tmp; return result / 10000; } static u32 cfg80211_calculate_bitrate_s1g(struct rate_info *rate) { /* For 1, 2, 4, 8 and 16 MHz channels */ static const u32 base[5][11] = { { 300000, 600000, 900000, 1200000, 1800000, 2400000, 2700000, 3000000, 3600000, 4000000, /* MCS 10 supported in 1 MHz only */ 150000, }, { 650000, 1300000, 1950000, 2600000, 3900000, 5200000, 5850000, 6500000, 7800000, /* MCS 9 not valid */ }, { 1350000, 2700000, 4050000, 5400000, 8100000, 10800000, 12150000, 13500000, 16200000, 18000000, }, { 2925000, 5850000, 8775000, 11700000, 17550000, 23400000, 26325000, 29250000, 35100000, 39000000, }, { 8580000, 11700000, 17550000, 23400000, 35100000, 46800000, 52650000, 58500000, 70200000, 78000000, }, }; u32 bitrate; /* default is 1 MHz index */ int idx = 0; if (rate->mcs >= 11) goto warn; switch (rate->bw) { case RATE_INFO_BW_16: idx = 4; break; case RATE_INFO_BW_8: idx = 3; break; case RATE_INFO_BW_4: idx = 2; break; case RATE_INFO_BW_2: idx = 1; break; case RATE_INFO_BW_1: idx = 0; break; case RATE_INFO_BW_5: case RATE_INFO_BW_10: case RATE_INFO_BW_20: case RATE_INFO_BW_40: case RATE_INFO_BW_80: case RATE_INFO_BW_160: default: goto warn; } bitrate = base[idx][rate->mcs]; bitrate *= rate->nss; if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) bitrate = (bitrate / 9) * 10; /* do NOT round down here */ return (bitrate + 50000) / 100000; warn: WARN_ONCE(1, "invalid rate bw=%d, mcs=%d, nss=%d\n", rate->bw, rate->mcs, rate->nss); return 0; } u32 cfg80211_calculate_bitrate(struct rate_info *rate) { if (rate->flags & RATE_INFO_FLAGS_MCS) return cfg80211_calculate_bitrate_ht(rate); if (rate->flags & RATE_INFO_FLAGS_DMG) return cfg80211_calculate_bitrate_dmg(rate); if (rate->flags & RATE_INFO_FLAGS_EXTENDED_SC_DMG) return cfg80211_calculate_bitrate_extended_sc_dmg(rate); if (rate->flags & RATE_INFO_FLAGS_EDMG) return cfg80211_calculate_bitrate_edmg(rate); if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) return cfg80211_calculate_bitrate_vht(rate); if (rate->flags & RATE_INFO_FLAGS_HE_MCS) return cfg80211_calculate_bitrate_he(rate); if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) return cfg80211_calculate_bitrate_eht(rate); if (rate->flags & RATE_INFO_FLAGS_S1G_MCS) return cfg80211_calculate_bitrate_s1g(rate); return rate->legacy; } EXPORT_SYMBOL(cfg80211_calculate_bitrate); int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len, enum ieee80211_p2p_attr_id attr, u8 *buf, unsigned int bufsize) { u8 *out = buf; u16 attr_remaining = 0; bool desired_attr = false; u16 desired_len = 0; while (len > 0) { unsigned int iedatalen; unsigned int copy; const u8 *iedata; if (len < 2) return -EILSEQ; iedatalen = ies[1]; if (iedatalen + 2 > len) return -EILSEQ; if (ies[0] != WLAN_EID_VENDOR_SPECIFIC) goto cont; if (iedatalen < 4) goto cont; iedata = ies + 2; /* check WFA OUI, P2P subtype */ if (iedata[0] != 0x50 || iedata[1] != 0x6f || iedata[2] != 0x9a || iedata[3] != 0x09) goto cont; iedatalen -= 4; iedata += 4; /* check attribute continuation into this IE */ copy = min_t(unsigned int, attr_remaining, iedatalen); if (copy && desired_attr) { desired_len += copy; if (out) { memcpy(out, iedata, min(bufsize, copy)); out += min(bufsize, copy); bufsize -= min(bufsize, copy); } if (copy == attr_remaining) return desired_len; } attr_remaining -= copy; if (attr_remaining) goto cont; iedatalen -= copy; iedata += copy; while (iedatalen > 0) { u16 attr_len; /* P2P attribute ID & size must fit */ if (iedatalen < 3) return -EILSEQ; desired_attr = iedata[0] == attr; attr_len = get_unaligned_le16(iedata + 1); iedatalen -= 3; iedata += 3; copy = min_t(unsigned int, attr_len, iedatalen); if (desired_attr) { desired_len += copy; if (out) { memcpy(out, iedata, min(bufsize, copy)); out += min(bufsize, copy); bufsize -= min(bufsize, copy); } if (copy == attr_len) return desired_len; } iedata += copy; iedatalen -= copy; attr_remaining = attr_len - copy; } cont: len -= ies[1] + 2; ies += ies[1] + 2; } if (attr_remaining && desired_attr) return -EILSEQ; return -ENOENT; } EXPORT_SYMBOL(cfg80211_get_p2p_attr); static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id, bool id_ext) { int i; /* Make sure array values are legal */ if (WARN_ON(ids[n_ids - 1] == WLAN_EID_EXTENSION)) return false; i = 0; while (i < n_ids) { if (ids[i] == WLAN_EID_EXTENSION) { if (id_ext && (ids[i + 1] == id)) return true; i += 2; continue; } if (ids[i] == id && !id_ext) return true; i++; } return false; } static size_t skip_ie(const u8 *ies, size_t ielen, size_t pos) { /* we assume a validly formed IEs buffer */ u8 len = ies[pos + 1]; pos += 2 + len; /* the IE itself must have 255 bytes for fragments to follow */ if (len < 255) return pos; while (pos < ielen && ies[pos] == WLAN_EID_FRAGMENT) { len = ies[pos + 1]; pos += 2 + len; } return pos; } size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, const u8 *after_ric, int n_after_ric, size_t offset) { size_t pos = offset; while (pos < ielen) { u8 ext = 0; if (ies[pos] == WLAN_EID_EXTENSION) ext = 2; if ((pos + ext) >= ielen) break; if (!ieee80211_id_in_list(ids, n_ids, ies[pos + ext], ies[pos] == WLAN_EID_EXTENSION)) break; if (ies[pos] == WLAN_EID_RIC_DATA && n_after_ric) { pos = skip_ie(ies, ielen, pos); while (pos < ielen) { if (ies[pos] == WLAN_EID_EXTENSION) ext = 2; else ext = 0; if ((pos + ext) >= ielen) break; if (!ieee80211_id_in_list(after_ric, n_after_ric, ies[pos + ext], ext == 2)) pos = skip_ie(ies, ielen, pos); else break; } } else { pos = skip_ie(ies, ielen, pos); } } return pos; } EXPORT_SYMBOL(ieee80211_ie_split_ric); void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id) { unsigned int elem_len; if (!len_pos) return; elem_len = skb->data + skb->len - len_pos - 1; while (elem_len > 255) { /* this one is 255 */ *len_pos = 255; /* remaining data gets smaller */ elem_len -= 255; /* make space for the fragment ID/len in SKB */ skb_put(skb, 2); /* shift back the remaining data to place fragment ID/len */ memmove(len_pos + 255 + 3, len_pos + 255 + 1, elem_len); /* place the fragment ID */ len_pos += 255 + 1; *len_pos = frag_id; /* and point to fragment length to update later */ len_pos++; } *len_pos = elem_len; } EXPORT_SYMBOL(ieee80211_fragment_element); bool ieee80211_operating_class_to_band(u8 operating_class, enum nl80211_band *band) { switch (operating_class) { case 112: case 115 ... 127: case 128 ... 130: *band = NL80211_BAND_5GHZ; return true; case 131 ... 135: case 137: *band = NL80211_BAND_6GHZ; return true; case 81: case 82: case 83: case 84: *band = NL80211_BAND_2GHZ; return true; case 180: *band = NL80211_BAND_60GHZ; return true; } return false; } EXPORT_SYMBOL(ieee80211_operating_class_to_band); bool ieee80211_operating_class_to_chandef(u8 operating_class, struct ieee80211_channel *chan, struct cfg80211_chan_def *chandef) { u32 control_freq, offset = 0; enum nl80211_band band; if (!ieee80211_operating_class_to_band(operating_class, &band) || !chan || band != chan->band) return false; control_freq = chan->center_freq; chandef->chan = chan; if (control_freq >= 5955) offset = control_freq - 5955; else if (control_freq >= 5745) offset = control_freq - 5745; else if (control_freq >= 5180) offset = control_freq - 5180; offset /= 20; switch (operating_class) { case 81: /* 2 GHz band; 20 MHz; channels 1..13 */ case 82: /* 2 GHz band; 20 MHz; channel 14 */ case 115: /* 5 GHz band; 20 MHz; channels 36,40,44,48 */ case 118: /* 5 GHz band; 20 MHz; channels 52,56,60,64 */ case 121: /* 5 GHz band; 20 MHz; channels 100..144 */ case 124: /* 5 GHz band; 20 MHz; channels 149,153,157,161 */ case 125: /* 5 GHz band; 20 MHz; channels 149..177 */ case 131: /* 6 GHz band; 20 MHz; channels 1..233*/ case 136: /* 6 GHz band; 20 MHz; channel 2 */ chandef->center_freq1 = control_freq; chandef->width = NL80211_CHAN_WIDTH_20; return true; case 83: /* 2 GHz band; 40 MHz; channels 1..9 */ case 116: /* 5 GHz band; 40 MHz; channels 36,44 */ case 119: /* 5 GHz band; 40 MHz; channels 52,60 */ case 122: /* 5 GHz band; 40 MHz; channels 100,108,116,124,132,140 */ case 126: /* 5 GHz band; 40 MHz; channels 149,157,165,173 */ chandef->center_freq1 = control_freq + 10; chandef->width = NL80211_CHAN_WIDTH_40; return true; case 84: /* 2 GHz band; 40 MHz; channels 5..13 */ case 117: /* 5 GHz band; 40 MHz; channels 40,48 */ case 120: /* 5 GHz band; 40 MHz; channels 56,64 */ case 123: /* 5 GHz band; 40 MHz; channels 104,112,120,128,136,144 */ case 127: /* 5 GHz band; 40 MHz; channels 153,161,169,177 */ chandef->center_freq1 = control_freq - 10; chandef->width = NL80211_CHAN_WIDTH_40; return true; case 132: /* 6 GHz band; 40 MHz; channels 1,5,..,229*/ chandef->center_freq1 = control_freq + 10 - (offset & 1) * 20; chandef->width = NL80211_CHAN_WIDTH_40; return true; case 128: /* 5 GHz band; 80 MHz; channels 36..64,100..144,149..177 */ case 133: /* 6 GHz band; 80 MHz; channels 1,5,..,229 */ chandef->center_freq1 = control_freq + 30 - (offset & 3) * 20; chandef->width = NL80211_CHAN_WIDTH_80; return true; case 129: /* 5 GHz band; 160 MHz; channels 36..64,100..144,149..177 */ case 134: /* 6 GHz band; 160 MHz; channels 1,5,..,229 */ chandef->center_freq1 = control_freq + 70 - (offset & 7) * 20; chandef->width = NL80211_CHAN_WIDTH_160; return true; case 130: /* 5 GHz band; 80+80 MHz; channels 36..64,100..144,149..177 */ case 135: /* 6 GHz band; 80+80 MHz; channels 1,5,..,229 */ /* The center_freq2 of 80+80 MHz is unknown */ case 137: /* 6 GHz band; 320 MHz; channels 1,5,..,229 */ /* 320-1 or 320-2 channelization is unknown */ default: return false; } } EXPORT_SYMBOL(ieee80211_operating_class_to_chandef); bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, u8 *op_class) { u8 vht_opclass; u32 freq = chandef->center_freq1; if (freq >= 2412 && freq <= 2472) { if (chandef->width > NL80211_CHAN_WIDTH_40) return false; /* 2.407 GHz, channels 1..13 */ if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 83; /* HT40+ */ else *op_class = 84; /* HT40- */ } else { *op_class = 81; } return true; } if (freq == 2484) { /* channel 14 is only for IEEE 802.11b */ if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT) return false; *op_class = 82; /* channel 14 */ return true; } switch (chandef->width) { case NL80211_CHAN_WIDTH_80: vht_opclass = 128; break; case NL80211_CHAN_WIDTH_160: vht_opclass = 129; break; case NL80211_CHAN_WIDTH_80P80: vht_opclass = 130; break; case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_5: return false; /* unsupported for now */ default: vht_opclass = 0; break; } /* 5 GHz, channels 36..48 */ if (freq >= 5180 && freq <= 5240) { if (vht_opclass) { *op_class = vht_opclass; } else if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 116; else *op_class = 117; } else { *op_class = 115; } return true; } /* 5 GHz, channels 52..64 */ if (freq >= 5260 && freq <= 5320) { if (vht_opclass) { *op_class = vht_opclass; } else if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 119; else *op_class = 120; } else { *op_class = 118; } return true; } /* 5 GHz, channels 100..144 */ if (freq >= 5500 && freq <= 5720) { if (vht_opclass) { *op_class = vht_opclass; } else if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 122; else *op_class = 123; } else { *op_class = 121; } return true; } /* 5 GHz, channels 149..169 */ if (freq >= 5745 && freq <= 5845) { if (vht_opclass) { *op_class = vht_opclass; } else if (chandef->width == NL80211_CHAN_WIDTH_40) { if (freq > chandef->chan->center_freq) *op_class = 126; else *op_class = 127; } else if (freq <= 5805) { *op_class = 124; } else { *op_class = 125; } return true; } /* 56.16 GHz, channel 1..4 */ if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 6) { if (chandef->width >= NL80211_CHAN_WIDTH_40) return false; *op_class = 180; return true; } /* not supported yet */ return false; } EXPORT_SYMBOL(ieee80211_chandef_to_operating_class); static int cfg80211_wdev_bi(struct wireless_dev *wdev) { switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: WARN_ON(wdev->valid_links); return wdev->links[0].ap.beacon_interval; case NL80211_IFTYPE_MESH_POINT: return wdev->u.mesh.beacon_interval; case NL80211_IFTYPE_ADHOC: return wdev->u.ibss.beacon_interval; default: break; } return 0; } static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int, u32 *beacon_int_gcd, bool *beacon_int_different, int radio_idx) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; *beacon_int_gcd = 0; *beacon_int_different = false; rdev = wiphy_to_rdev(wiphy); list_for_each_entry(wdev, &wiphy->wdev_list, list) { int wdev_bi; /* this feature isn't supported with MLO */ if (wdev->valid_links) continue; /* skip wdevs not active on the given wiphy radio */ if (radio_idx >= 0 && !(rdev_get_radio_mask(rdev, wdev->netdev) & BIT(radio_idx))) continue; wdev_bi = cfg80211_wdev_bi(wdev); if (!wdev_bi) continue; if (!*beacon_int_gcd) { *beacon_int_gcd = wdev_bi; continue; } if (wdev_bi == *beacon_int_gcd) continue; *beacon_int_different = true; *beacon_int_gcd = gcd(*beacon_int_gcd, wdev_bi); } if (new_beacon_int && *beacon_int_gcd != new_beacon_int) { if (*beacon_int_gcd) *beacon_int_different = true; *beacon_int_gcd = gcd(*beacon_int_gcd, new_beacon_int); } } int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, u32 beacon_int) { /* * This is just a basic pre-condition check; if interface combinations * are possible the driver must already be checking those with a call * to cfg80211_check_combinations(), in which case we'll validate more * through the cfg80211_calculate_bi_data() call and code in * cfg80211_iter_combinations(). */ if (beacon_int < 10 || beacon_int > 10000) return -EINVAL; return 0; } int cfg80211_iter_combinations(struct wiphy *wiphy, struct iface_combination_params *params, void (*iter)(const struct ieee80211_iface_combination *c, void *data), void *data) { const struct wiphy_radio *radio = NULL; const struct ieee80211_iface_combination *c, *cs; const struct ieee80211_regdomain *regdom; enum nl80211_dfs_regions region = 0; int i, j, n, iftype; int num_interfaces = 0; u32 used_iftypes = 0; u32 beacon_int_gcd; bool beacon_int_different; if (params->radio_idx >= 0) radio = &wiphy->radio[params->radio_idx]; /* * This is a bit strange, since the iteration used to rely only on * the data given by the driver, but here it now relies on context, * in form of the currently operating interfaces. * This is OK for all current users, and saves us from having to * push the GCD calculations into all the drivers. * In the future, this should probably rely more on data that's in * cfg80211 already - the only thing not would appear to be any new * interfaces (while being brought up) and channel/radar data. */ cfg80211_calculate_bi_data(wiphy, params->new_beacon_int, &beacon_int_gcd, &beacon_int_different, params->radio_idx); if (params->radar_detect) { rcu_read_lock(); regdom = rcu_dereference(cfg80211_regdomain); if (regdom) region = regdom->dfs_region; rcu_read_unlock(); } for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { num_interfaces += params->iftype_num[iftype]; if (params->iftype_num[iftype] > 0 && !cfg80211_iftype_allowed(wiphy, iftype, 0, 1)) used_iftypes |= BIT(iftype); } if (radio) { cs = radio->iface_combinations; n = radio->n_iface_combinations; } else { cs = wiphy->iface_combinations; n = wiphy->n_iface_combinations; } for (i = 0; i < n; i++) { struct ieee80211_iface_limit *limits; u32 all_iftypes = 0; c = &cs[i]; if (num_interfaces > c->max_interfaces) continue; if (params->num_different_channels > c->num_different_channels) continue; limits = kmemdup_array(c->limits, c->n_limits, sizeof(*limits), GFP_KERNEL); if (!limits) return -ENOMEM; for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1)) continue; for (j = 0; j < c->n_limits; j++) { all_iftypes |= limits[j].types; if (!(limits[j].types & BIT(iftype))) continue; if (limits[j].max < params->iftype_num[iftype]) goto cont; limits[j].max -= params->iftype_num[iftype]; } } if (params->radar_detect != (c->radar_detect_widths & params->radar_detect)) goto cont; if (params->radar_detect && c->radar_detect_regions && !(c->radar_detect_regions & BIT(region))) goto cont; /* Finally check that all iftypes that we're currently * using are actually part of this combination. If they * aren't then we can't use this combination and have * to continue to the next. */ if ((all_iftypes & used_iftypes) != used_iftypes) goto cont; if (beacon_int_gcd) { if (c->beacon_int_min_gcd && beacon_int_gcd < c->beacon_int_min_gcd) goto cont; if (!c->beacon_int_min_gcd && beacon_int_different) goto cont; } /* This combination covered all interface types and * supported the requested numbers, so we're good. */ (*iter)(c, data); cont: kfree(limits); } return 0; } EXPORT_SYMBOL(cfg80211_iter_combinations); static void cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c, void *data) { int *num = data; (*num)++; } int cfg80211_check_combinations(struct wiphy *wiphy, struct iface_combination_params *params) { int err, num = 0; err = cfg80211_iter_combinations(wiphy, params, cfg80211_iter_sum_ifcombs, &num); if (err) return err; if (num == 0) return -EBUSY; return 0; } EXPORT_SYMBOL(cfg80211_check_combinations); int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, const u8 *rates, unsigned int n_rates, u32 *mask) { int i, j; if (!sband) return -EINVAL; if (n_rates == 0 || n_rates > NL80211_MAX_SUPP_RATES) return -EINVAL; *mask = 0; for (i = 0; i < n_rates; i++) { int rate = (rates[i] & 0x7f) * 5; bool found = false; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].bitrate == rate) { found = true; *mask |= BIT(j); break; } } if (!found) return -EINVAL; } /* * mask must have at least one bit set here since we * didn't accept a 0-length rates array nor allowed * entries in the array that didn't exist */ return 0; } unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy) { enum nl80211_band band; unsigned int n_channels = 0; for (band = 0; band < NUM_NL80211_BANDS; band++) if (wiphy->bands[band]) n_channels += wiphy->bands[band]->n_channels; return n_channels; } EXPORT_SYMBOL(ieee80211_get_num_supported_channels); int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; wdev = dev->ieee80211_ptr; if (!wdev) return -EOPNOTSUPP; rdev = wiphy_to_rdev(wdev->wiphy); if (!rdev->ops->get_station) return -EOPNOTSUPP; memset(sinfo, 0, sizeof(*sinfo)); guard(wiphy)(&rdev->wiphy); return rdev_get_station(rdev, dev, mac_addr, sinfo); } EXPORT_SYMBOL(cfg80211_get_station); void cfg80211_free_nan_func(struct cfg80211_nan_func *f) { int i; if (!f) return; kfree(f->serv_spec_info); kfree(f->srf_bf); kfree(f->srf_macs); for (i = 0; i < f->num_rx_filters; i++) kfree(f->rx_filters[i].filter); for (i = 0; i < f->num_tx_filters; i++) kfree(f->tx_filters[i].filter); kfree(f->rx_filters); kfree(f->tx_filters); kfree(f); } EXPORT_SYMBOL(cfg80211_free_nan_func); bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, u32 center_freq_khz, u32 bw_khz) { u32 start_freq_khz, end_freq_khz; start_freq_khz = center_freq_khz - (bw_khz / 2); end_freq_khz = center_freq_khz + (bw_khz / 2); if (start_freq_khz >= freq_range->start_freq_khz && end_freq_khz <= freq_range->end_freq_khz) return true; return false; } int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp) { sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1, sizeof(*(sinfo->pertid)), gfp); if (!sinfo->pertid) return -ENOMEM; return 0; } EXPORT_SYMBOL(cfg80211_sinfo_alloc_tid_stats); /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ const unsigned char rfc1042_header[] __aligned(2) = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; EXPORT_SYMBOL(rfc1042_header); /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ const unsigned char bridge_tunnel_header[] __aligned(2) = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; EXPORT_SYMBOL(bridge_tunnel_header); /* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ struct iapp_layer2_update { u8 da[ETH_ALEN]; /* broadcast */ u8 sa[ETH_ALEN]; /* STA addr */ __be16 len; /* 6 */ u8 dsap; /* 0 */ u8 ssap; /* 0 */ u8 control; u8 xid_info[3]; } __packed; void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr) { struct iapp_layer2_update *msg; struct sk_buff *skb; /* Send Level 2 Update Frame to update forwarding tables in layer 2 * bridge devices */ skb = dev_alloc_skb(sizeof(*msg)); if (!skb) return; msg = skb_put(skb, sizeof(*msg)); /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID) * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ eth_broadcast_addr(msg->da); ether_addr_copy(msg->sa, addr); msg->len = htons(6); msg->dsap = 0; msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ msg->control = 0xaf; /* XID response lsb.1111F101. * F=0 (no poll command; unsolicited frame) */ msg->xid_info[0] = 0x81; /* XID format identifier */ msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */ msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */ skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } EXPORT_SYMBOL(cfg80211_send_layer2_update); int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, enum ieee80211_vht_chanwidth bw, int mcs, bool ext_nss_bw_capable, unsigned int max_vht_nss) { u16 map = le16_to_cpu(cap->supp_mcs.rx_mcs_map); int ext_nss_bw; int supp_width; int i, mcs_encoding; if (map == 0xffff) return 0; if (WARN_ON(mcs > 9 || max_vht_nss > 8)) return 0; if (mcs <= 7) mcs_encoding = 0; else if (mcs == 8) mcs_encoding = 1; else mcs_encoding = 2; if (!max_vht_nss) { /* find max_vht_nss for the given MCS */ for (i = 7; i >= 0; i--) { int supp = (map >> (2 * i)) & 3; if (supp == 3) continue; if (supp >= mcs_encoding) { max_vht_nss = i + 1; break; } } } if (!(cap->supp_mcs.tx_mcs_map & cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE))) return max_vht_nss; ext_nss_bw = le32_get_bits(cap->vht_cap_info, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK); supp_width = le32_get_bits(cap->vht_cap_info, IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK); /* if not capable, treat ext_nss_bw as 0 */ if (!ext_nss_bw_capable) ext_nss_bw = 0; /* This is invalid */ if (supp_width == 3) return 0; /* This is an invalid combination so pretend nothing is supported */ if (supp_width == 2 && (ext_nss_bw == 1 || ext_nss_bw == 2)) return 0; /* * Cover all the special cases according to IEEE 802.11-2016 * Table 9-250. All other cases are either factor of 1 or not * valid/supported. */ switch (bw) { case IEEE80211_VHT_CHANWIDTH_USE_HT: case IEEE80211_VHT_CHANWIDTH_80MHZ: if ((supp_width == 1 || supp_width == 2) && ext_nss_bw == 3) return 2 * max_vht_nss; break; case IEEE80211_VHT_CHANWIDTH_160MHZ: if (supp_width == 0 && (ext_nss_bw == 1 || ext_nss_bw == 2)) return max_vht_nss / 2; if (supp_width == 0 && ext_nss_bw == 3) return (3 * max_vht_nss) / 4; if (supp_width == 1 && ext_nss_bw == 3) return 2 * max_vht_nss; break; case IEEE80211_VHT_CHANWIDTH_80P80MHZ: if (supp_width == 0 && ext_nss_bw == 1) return 0; /* not possible */ if (supp_width == 0 && ext_nss_bw == 2) return max_vht_nss / 2; if (supp_width == 0 && ext_nss_bw == 3) return (3 * max_vht_nss) / 4; if (supp_width == 1 && ext_nss_bw == 0) return 0; /* not possible */ if (supp_width == 1 && ext_nss_bw == 1) return max_vht_nss / 2; if (supp_width == 1 && ext_nss_bw == 2) return (3 * max_vht_nss) / 4; break; } /* not covered or invalid combination received */ return max_vht_nss; } EXPORT_SYMBOL(ieee80211_get_vht_max_nss); bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, bool is_4addr, u8 check_swif) { bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN; switch (check_swif) { case 0: if (is_vlan && is_4addr) return wiphy->flags & WIPHY_FLAG_4ADDR_AP; return wiphy->interface_modes & BIT(iftype); case 1: if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan) return wiphy->flags & WIPHY_FLAG_4ADDR_AP; return wiphy->software_iftypes & BIT(iftype); default: break; } return false; } EXPORT_SYMBOL(cfg80211_iftype_allowed); void cfg80211_remove_link(struct wireless_dev *wdev, unsigned int link_id) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); lockdep_assert_wiphy(wdev->wiphy); switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: cfg80211_stop_ap(rdev, wdev->netdev, link_id, true); break; default: /* per-link not relevant */ break; } rdev_del_intf_link(rdev, wdev, link_id); wdev->valid_links &= ~BIT(link_id); eth_zero_addr(wdev->links[link_id].addr); } void cfg80211_remove_links(struct wireless_dev *wdev) { unsigned int link_id; /* * links are controlled by upper layers (userspace/cfg) * only for AP mode, so only remove them here for AP */ if (wdev->iftype != NL80211_IFTYPE_AP) return; if (wdev->valid_links) { for_each_valid_link(wdev, link_id) cfg80211_remove_link(wdev, link_id); } } int cfg80211_remove_virtual_intf(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { cfg80211_remove_links(wdev); return rdev_del_virtual_intf(rdev, wdev); } const struct wiphy_iftype_ext_capab * cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type) { int i; for (i = 0; i < wiphy->num_iftype_ext_capab; i++) { if (wiphy->iftype_ext_capab[i].iftype == type) return &wiphy->iftype_ext_capab[i]; } return NULL; } EXPORT_SYMBOL(cfg80211_get_iftype_ext_capa); static bool ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, u32 freq, u32 width) { const struct wiphy_radio_freq_range *r; int i; for (i = 0; i < radio->n_freq_range; i++) { r = &radio->freq_range[i]; if (freq - width / 2 >= r->start_freq && freq + width / 2 <= r->end_freq) return true; } return false; } bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, const struct cfg80211_chan_def *chandef) { u32 freq, width; freq = ieee80211_chandef_to_khz(chandef); width = cfg80211_chandef_get_width(chandef); if (!ieee80211_radio_freq_range_valid(radio, freq, width)) return false; freq = MHZ_TO_KHZ(chandef->center_freq2); if (freq && !ieee80211_radio_freq_range_valid(radio, freq, width)) return false; return true; } EXPORT_SYMBOL(cfg80211_radio_chandef_valid); bool cfg80211_wdev_channel_allowed(struct wireless_dev *wdev, struct ieee80211_channel *chan) { struct wiphy *wiphy = wdev->wiphy; const struct wiphy_radio *radio; struct cfg80211_chan_def chandef; u32 radio_mask; int i; radio_mask = wdev->radio_mask; if (!wiphy->n_radio || radio_mask == BIT(wiphy->n_radio) - 1) return true; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); for (i = 0; i < wiphy->n_radio; i++) { if (!(radio_mask & BIT(i))) continue; radio = &wiphy->radio[i]; if (!cfg80211_radio_chandef_valid(radio, &chandef)) continue; return true; } return false; } EXPORT_SYMBOL(cfg80211_wdev_channel_allowed); |
117 115 117 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net/core/netprio_cgroup.c Priority Control Group * * Authors: Neil Horman <nhorman@tuxdriver.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/cgroup.h> #include <linux/rcupdate.h> #include <linux/atomic.h> #include <linux/sched/task.h> #include <net/rtnetlink.h> #include <net/pkt_cls.h> #include <net/sock.h> #include <net/netprio_cgroup.h> #include <linux/fdtable.h> /* * netprio allocates per-net_device priomap array which is indexed by * css->id. Limiting css ID to 16bits doesn't lose anything. */ #define NETPRIO_ID_MAX USHRT_MAX #define PRIOMAP_MIN_SZ 128 /* * Extend @dev->priomap so that it's large enough to accommodate * @target_idx. @dev->priomap.priomap_len > @target_idx after successful * return. Must be called under rtnl lock. */ static int extend_netdev_table(struct net_device *dev, u32 target_idx) { struct netprio_map *old, *new; size_t new_sz, new_len; /* is the existing priomap large enough? */ old = rtnl_dereference(dev->priomap); if (old && old->priomap_len > target_idx) return 0; /* * Determine the new size. Let's keep it power-of-two. We start * from PRIOMAP_MIN_SZ and double it until it's large enough to * accommodate @target_idx. */ new_sz = PRIOMAP_MIN_SZ; while (true) { new_len = (new_sz - offsetof(struct netprio_map, priomap)) / sizeof(new->priomap[0]); if (new_len > target_idx) break; new_sz *= 2; /* overflowed? */ if (WARN_ON(new_sz < PRIOMAP_MIN_SZ)) return -ENOSPC; } /* allocate & copy */ new = kzalloc(new_sz, GFP_KERNEL); if (!new) return -ENOMEM; if (old) memcpy(new->priomap, old->priomap, old->priomap_len * sizeof(old->priomap[0])); new->priomap_len = new_len; /* install the new priomap */ rcu_assign_pointer(dev->priomap, new); if (old) kfree_rcu(old, rcu); return 0; } /** * netprio_prio - return the effective netprio of a cgroup-net_device pair * @css: css part of the target pair * @dev: net_device part of the target pair * * Should be called under RCU read or rtnl lock. */ static u32 netprio_prio(struct cgroup_subsys_state *css, struct net_device *dev) { struct netprio_map *map = rcu_dereference_rtnl(dev->priomap); int id = css->id; if (map && id < map->priomap_len) return map->priomap[id]; return 0; } /** * netprio_set_prio - set netprio on a cgroup-net_device pair * @css: css part of the target pair * @dev: net_device part of the target pair * @prio: prio to set * * Set netprio to @prio on @css-@dev pair. Should be called under rtnl * lock and may fail under memory pressure for non-zero @prio. */ static int netprio_set_prio(struct cgroup_subsys_state *css, struct net_device *dev, u32 prio) { struct netprio_map *map; int id = css->id; int ret; /* avoid extending priomap for zero writes */ map = rtnl_dereference(dev->priomap); if (!prio && (!map || map->priomap_len <= id)) return 0; ret = extend_netdev_table(dev, id); if (ret) return ret; map = rtnl_dereference(dev->priomap); map->priomap[id] = prio; return 0; } static struct cgroup_subsys_state * cgrp_css_alloc(struct cgroup_subsys_state *parent_css) { struct cgroup_subsys_state *css; css = kzalloc(sizeof(*css), GFP_KERNEL); if (!css) return ERR_PTR(-ENOMEM); return css; } static int cgrp_css_online(struct cgroup_subsys_state *css) { struct cgroup_subsys_state *parent_css = css->parent; struct net_device *dev; int ret = 0; if (css->id > NETPRIO_ID_MAX) return -ENOSPC; if (!parent_css) return 0; rtnl_lock(); /* * Inherit prios from the parent. As all prios are set during * onlining, there is no need to clear them on offline. */ for_each_netdev(&init_net, dev) { u32 prio = netprio_prio(parent_css, dev); ret = netprio_set_prio(css, dev, prio); if (ret) break; } rtnl_unlock(); return ret; } static void cgrp_css_free(struct cgroup_subsys_state *css) { kfree(css); } static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft) { return css->id; } static int read_priomap(struct seq_file *sf, void *v) { struct net_device *dev; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) seq_printf(sf, "%s %u\n", dev->name, netprio_prio(seq_css(sf), dev)); rcu_read_unlock(); return 0; } static ssize_t write_priomap(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { char devname[IFNAMSIZ + 1]; struct net_device *dev; u32 prio; int ret; if (sscanf(buf, "%"__stringify(IFNAMSIZ)"s %u", devname, &prio) != 2) return -EINVAL; dev = dev_get_by_name(&init_net, devname); if (!dev) return -ENODEV; rtnl_lock(); ret = netprio_set_prio(of_css(of), dev, prio); rtnl_unlock(); dev_put(dev); return ret ?: nbytes; } static int update_netprio(const void *v, struct file *file, unsigned n) { struct socket *sock = sock_from_file(file); if (sock) sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data, (unsigned long)v); return 0; } static void net_prio_attach(struct cgroup_taskset *tset) { struct task_struct *p; struct cgroup_subsys_state *css; cgroup_taskset_for_each(p, css, tset) { void *v = (void *)(unsigned long)css->id; task_lock(p); iterate_fd(p->files, 0, update_netprio, v); task_unlock(p); } } static struct cftype ss_files[] = { { .name = "prioidx", .read_u64 = read_prioidx, }, { .name = "ifpriomap", .seq_show = read_priomap, .write = write_priomap, }, { } /* terminate */ }; struct cgroup_subsys net_prio_cgrp_subsys = { .css_alloc = cgrp_css_alloc, .css_online = cgrp_css_online, .css_free = cgrp_css_free, .attach = net_prio_attach, .legacy_cftypes = ss_files, }; static int netprio_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netprio_map *old; /* * Note this is called with rtnl_lock held so we have update side * protection on our rcu assignments */ switch (event) { case NETDEV_UNREGISTER: old = rtnl_dereference(dev->priomap); RCU_INIT_POINTER(dev->priomap, NULL); if (old) kfree_rcu(old, rcu); break; } return NOTIFY_DONE; } static struct notifier_block netprio_device_notifier = { .notifier_call = netprio_device_event }; static int __init init_cgroup_netprio(void) { register_netdevice_notifier(&netprio_device_notifier); return 0; } subsys_initcall(init_cgroup_netprio); |
5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMU_NOTIFIER_H #define _LINUX_MMU_NOTIFIER_H #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mm_types.h> #include <linux/mmap_lock.h> #include <linux/srcu.h> #include <linux/interval_tree.h> struct mmu_notifier_subscriptions; struct mmu_notifier; struct mmu_notifier_range; struct mmu_interval_notifier; /** * enum mmu_notifier_event - reason for the mmu notifier callback * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that * move the range * * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like * madvise() or replacing a page by another one, ...). * * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range * ie using the vma access permission (vm_page_prot) to update the whole range * is enough no need to inspect changes to the CPU page table (mprotect() * syscall) * * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for * pages in the range so to mirror those changes the user must inspect the CPU * page table (from the end callback). * * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same * access flags). User should soft dirty the page in the end callback to make * sure that anyone relying on soft dirtiness catch pages that might be written * through non CPU mappings. * * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal * that |