17 17 17 17 21 21 21 17 21 21 16 17 17 17 17 17 17 17 5 17 5 5 5 1502 1505 158 21 17 17 16 17 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 | // SPDX-License-Identifier: GPL-2.0-or-later /* * LAPB release 002 * * This code REQUIRES 2.1.15 or higher/ NET3.038 * * History * LAPB 001 Jonathan Naylor Started Coding * LAPB 002 Jonathan Naylor New timer architecture. * 2000-10-29 Henner Eisen lapb_data_indication() return status. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/init.h> #include <net/lapb.h> static LIST_HEAD(lapb_list); static DEFINE_RWLOCK(lapb_list_lock); /* * Free an allocated lapb control block. */ static void lapb_free_cb(struct lapb_cb *lapb) { kfree(lapb); } static __inline__ void lapb_hold(struct lapb_cb *lapb) { refcount_inc(&lapb->refcnt); } static __inline__ void lapb_put(struct lapb_cb *lapb) { if (refcount_dec_and_test(&lapb->refcnt)) lapb_free_cb(lapb); } /* * Socket removal during an interrupt is now safe. */ static void __lapb_remove_cb(struct lapb_cb *lapb) { if (lapb->node.next) { list_del(&lapb->node); lapb_put(lapb); } } /* * Add a socket to the bound sockets list. */ static void __lapb_insert_cb(struct lapb_cb *lapb) { list_add(&lapb->node, &lapb_list); lapb_hold(lapb); } static struct lapb_cb *__lapb_devtostruct(struct net_device *dev) { struct lapb_cb *lapb, *use = NULL; list_for_each_entry(lapb, &lapb_list, node) { if (lapb->dev == dev) { use = lapb; break; } } if (use) lapb_hold(use); return use; } static struct lapb_cb *lapb_devtostruct(struct net_device *dev) { struct lapb_cb *rc; read_lock_bh(&lapb_list_lock); rc = __lapb_devtostruct(dev); read_unlock_bh(&lapb_list_lock); return rc; } /* * Create an empty LAPB control block. */ static struct lapb_cb *lapb_create_cb(void) { struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC); if (!lapb) goto out; skb_queue_head_init(&lapb->write_queue); skb_queue_head_init(&lapb->ack_queue); timer_setup(&lapb->t1timer, NULL, 0); timer_setup(&lapb->t2timer, NULL, 0); lapb->t1timer_running = false; lapb->t2timer_running = false; lapb->t1 = LAPB_DEFAULT_T1; lapb->t2 = LAPB_DEFAULT_T2; lapb->n2 = LAPB_DEFAULT_N2; lapb->mode = LAPB_DEFAULT_MODE; lapb->window = LAPB_DEFAULT_WINDOW; lapb->state = LAPB_STATE_0; spin_lock_init(&lapb->lock); refcount_set(&lapb->refcnt, 1); out: return lapb; } int lapb_register(struct net_device *dev, const struct lapb_register_struct *callbacks) { struct lapb_cb *lapb; int rc = LAPB_BADTOKEN; write_lock_bh(&lapb_list_lock); lapb = __lapb_devtostruct(dev); if (lapb) { lapb_put(lapb); goto out; } lapb = lapb_create_cb(); rc = LAPB_NOMEM; if (!lapb) goto out; lapb->dev = dev; lapb->callbacks = callbacks; __lapb_insert_cb(lapb); lapb_start_t1timer(lapb); rc = LAPB_OK; out: write_unlock_bh(&lapb_list_lock); return rc; } EXPORT_SYMBOL(lapb_register); int lapb_unregister(struct net_device *dev) { struct lapb_cb *lapb; int rc = LAPB_BADTOKEN; write_lock_bh(&lapb_list_lock); lapb = __lapb_devtostruct(dev); if (!lapb) goto out; lapb_put(lapb); /* Wait for other refs to "lapb" to drop */ while (refcount_read(&lapb->refcnt) > 2) usleep_range(1, 10); spin_lock_bh(&lapb->lock); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb_clear_queues(lapb); spin_unlock_bh(&lapb->lock); /* Wait for running timers to stop */ timer_delete_sync(&lapb->t1timer); timer_delete_sync(&lapb->t2timer); __lapb_remove_cb(lapb); lapb_put(lapb); rc = LAPB_OK; out: write_unlock_bh(&lapb_list_lock); return rc; } EXPORT_SYMBOL(lapb_unregister); int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) { int rc = LAPB_BADTOKEN; struct lapb_cb *lapb = lapb_devtostruct(dev); if (!lapb) goto out; spin_lock_bh(&lapb->lock); parms->t1 = lapb->t1 / HZ; parms->t2 = lapb->t2 / HZ; parms->n2 = lapb->n2; parms->n2count = lapb->n2count; parms->state = lapb->state; parms->window = lapb->window; parms->mode = lapb->mode; if (!timer_pending(&lapb->t1timer)) parms->t1timer = 0; else parms->t1timer = (lapb->t1timer.expires - jiffies) / HZ; if (!timer_pending(&lapb->t2timer)) parms->t2timer = 0; else parms->t2timer = (lapb->t2timer.expires - jiffies) / HZ; spin_unlock_bh(&lapb->lock); lapb_put(lapb); rc = LAPB_OK; out: return rc; } EXPORT_SYMBOL(lapb_getparms); int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms) { int rc = LAPB_BADTOKEN; struct lapb_cb *lapb = lapb_devtostruct(dev); if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = LAPB_INVALUE; if (parms->t1 < 1 || parms->t2 < 1 || parms->n2 < 1) goto out_put; if (lapb->state == LAPB_STATE_0) { if (parms->mode & LAPB_EXTENDED) { if (parms->window < 1 || parms->window > 127) goto out_put; } else { if (parms->window < 1 || parms->window > 7) goto out_put; } lapb->mode = parms->mode; lapb->window = parms->window; } lapb->t1 = parms->t1 * HZ; lapb->t2 = parms->t2 * HZ; lapb->n2 = parms->n2; rc = LAPB_OK; out_put: spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_setparms); int lapb_connect_request(struct net_device *dev) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = LAPB_OK; if (lapb->state == LAPB_STATE_1) goto out_put; rc = LAPB_CONNECTED; if (lapb->state == LAPB_STATE_3 || lapb->state == LAPB_STATE_4) goto out_put; lapb_establish_data_link(lapb); lapb_dbg(0, "(%p) S0 -> S1\n", lapb->dev); lapb->state = LAPB_STATE_1; rc = LAPB_OK; out_put: spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_connect_request); static int __lapb_disconnect_request(struct lapb_cb *lapb) { switch (lapb->state) { case LAPB_STATE_0: return LAPB_NOTCONNECTED; case LAPB_STATE_1: lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev); lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); return LAPB_NOTCONNECTED; case LAPB_STATE_2: return LAPB_OK; } lapb_clear_queues(lapb); lapb->n2count = 0; lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_2; lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev); lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev); return LAPB_OK; } int lapb_disconnect_request(struct net_device *dev) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = __lapb_disconnect_request(lapb); spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_disconnect_request); int lapb_data_request(struct net_device *dev, struct sk_buff *skb) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = LAPB_NOTCONNECTED; if (lapb->state != LAPB_STATE_3 && lapb->state != LAPB_STATE_4) goto out_put; skb_queue_tail(&lapb->write_queue, skb); lapb_kick(lapb); rc = LAPB_OK; out_put: spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_data_request); int lapb_data_received(struct net_device *dev, struct sk_buff *skb) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (lapb) { spin_lock_bh(&lapb->lock); lapb_data_input(lapb, skb); spin_unlock_bh(&lapb->lock); lapb_put(lapb); rc = LAPB_OK; } return rc; } EXPORT_SYMBOL(lapb_data_received); void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->connect_confirmation) lapb->callbacks->connect_confirmation(lapb->dev, reason); } void lapb_connect_indication(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->connect_indication) lapb->callbacks->connect_indication(lapb->dev, reason); } void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->disconnect_confirmation) lapb->callbacks->disconnect_confirmation(lapb->dev, reason); } void lapb_disconnect_indication(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->disconnect_indication) lapb->callbacks->disconnect_indication(lapb->dev, reason); } int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb) { if (lapb->callbacks->data_indication) return lapb->callbacks->data_indication(lapb->dev, skb); kfree_skb(skb); return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */ } int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) { int used = 0; if (lapb->callbacks->data_transmit) { lapb->callbacks->data_transmit(lapb->dev, skb); used = 1; } return used; } /* Handle device status changes. */ static int lapb_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct lapb_cb *lapb; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->type != ARPHRD_X25) return NOTIFY_DONE; lapb = lapb_devtostruct(dev); if (!lapb) return NOTIFY_DONE; spin_lock_bh(&lapb->lock); switch (event) { case NETDEV_UP: lapb_dbg(0, "(%p) Interface up: %s\n", dev, dev->name); if (netif_carrier_ok(dev)) { lapb_dbg(0, "(%p): Carrier is already up: %s\n", dev, dev->name); if (lapb->mode & LAPB_DCE) { lapb_start_t1timer(lapb); } else { if (lapb->state == LAPB_STATE_0) { lapb->state = LAPB_STATE_1; lapb_establish_data_link(lapb); } } } break; case NETDEV_GOING_DOWN: if (netif_carrier_ok(dev)) __lapb_disconnect_request(lapb); break; case NETDEV_DOWN: lapb_dbg(0, "(%p) Interface down: %s\n", dev, dev->name); lapb_dbg(0, "(%p) S%d -> S0\n", dev, lapb->state); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb->n2count = 0; lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); break; case NETDEV_CHANGE: if (netif_carrier_ok(dev)) { lapb_dbg(0, "(%p): Carrier detected: %s\n", dev, dev->name); if (lapb->mode & LAPB_DCE) { lapb_start_t1timer(lapb); } else { if (lapb->state == LAPB_STATE_0) { lapb->state = LAPB_STATE_1; lapb_establish_data_link(lapb); } } } else { lapb_dbg(0, "(%p) Carrier lost: %s\n", dev, dev->name); lapb_dbg(0, "(%p) S%d -> S0\n", dev, lapb->state); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb->n2count = 0; lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); } break; } spin_unlock_bh(&lapb->lock); lapb_put(lapb); return NOTIFY_DONE; } static struct notifier_block lapb_dev_notifier = { .notifier_call = lapb_device_event, }; static int __init lapb_init(void) { return register_netdevice_notifier(&lapb_dev_notifier); } static void __exit lapb_exit(void) { WARN_ON(!list_empty(&lapb_list)); unregister_netdevice_notifier(&lapb_dev_notifier); } MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The X.25 Link Access Procedure B link layer protocol"); MODULE_LICENSE("GPL"); module_init(lapb_init); module_exit(lapb_exit); |
147 52 147 103 64 51 51 105 240 10 217 10 10 10 10 10 10 10 6 10 10 55 22 55 55 55 55 55 12 54 54 54 54 53 52 53 32 32 306 32 301 300 147 64 98 50 103 103 64 299 301 301 301 241 146 300 96 301 104 1 249 249 248 146 145 145 144 143 144 144 142 74 74 70 70 70 1 70 70 25 15 233 234 234 233 233 155 144 80 79 144 153 155 155 74 155 155 155 98 30 95 241 241 240 241 241 240 241 241 241 241 241 239 240 108 2 2 109 108 106 106 107 106 8 7 8 8 8 3 3 3 3 3 3 3 3 26 22 23 26 8 8 8 8 8 8 8 8 8 26 8 9 9 9 9 8 8 1 7 8 8 3 8 8 8 9 144 145 144 145 101 101 101 101 145 145 145 144 1 143 145 107 103 103 7 144 145 145 145 143 144 94 144 145 145 145 143 145 1 145 32 32 32 32 32 32 30 30 30 30 131 181 113 113 5 64 145 142 145 144 144 143 144 143 144 8 144 132 145 145 132 145 121 142 139 135 142 2 138 104 102 45 15 13 36 29 37 29 7 7 7 117 142 144 45 14 133 27 26 144 6 15 10 10 2 8 117 117 117 117 7 116 116 116 108 107 115 107 1 1 1 117 115 89 25 25 25 25 25 25 62 63 37 95 69 37 4 3 4 10 1 8 5 9 89 8 1 8 2 8 29 2 11 6 6 88 86 89 89 85 86 62 86 1 9 10 83 83 83 23 83 3 59 59 35 25 18 25 36 59 28 28 1 2 2 7 6 20 2 71 39 39 39 3 39 35 39 29 28 72 35 37 37 34 65 13 5 4 72 65 21 21 21 21 69 10 75 2 1 2 2 82 86 62 62 89 14 13 6 6 6 3 2 1 1 1 1 1 1 1 1 1 1 10 4 9 10 6 6 6 2 4 2 2 2 2 2 2 1 2 2 2 2 2 2 2 2 2 2 1 2 2 2 2 5 5 21 6 21 21 21 21 20 7 7 20 20 6 20 20 20 20 6 2 2 6 6 6 6 21 20 21 21 21 20 20 21 21 21 6 21 21 6 21 17 17 16 3 3 14 22 22 22 20 56 1 57 1 1 1 24 24 22 23 24 25 25 24 24 68 69 59 42 65 53 41 41 35 35 35 34 27 26 29 12 23 61 61 7 61 54 22 22 7 7 58 58 54 1 57 19 57 8 57 54 12 9 9 9 1 1 8 9 52 41 41 53 53 53 53 53 7 53 61 41 78 44 78 78 1 78 78 2 2 78 78 53 53 53 53 65 14 15 65 64 19 1 47 2 2 47 6 39 44 21 76 76 4 76 76 63 63 63 76 4 3 1 75 74 78 5 5 79 54 79 70 14 79 79 64 79 79 78 79 79 73 78 78 78 18 18 78 70 78 78 78 78 78 77 78 77 78 3 78 78 78 78 78 77 5 5 5 76 76 77 77 53 77 77 77 24 77 50 76 7 76 76 79 66 54 54 55 54 79 1 79 79 15 79 79 79 78 1 79 4 77 76 75 76 76 78 79 8 8 7 58 29 52 32 12 25 37 51 2 50 31 20 20 63 11 34 31 31 31 7 7 5 50 49 50 52 50 4 1 4 3 4 4 4 7 1 7 7 7 7 7 4 4 3 3 3 7 7 7 1 66 65 66 33 66 66 66 9 65 5 5 63 63 34 28 28 12 11 1 1 1 1 1 7 7 66 63 63 35 35 63 63 62 8 62 56 56 56 8 63 62 63 28 22 22 28 7 7 3 4 4 28 28 28 171 172 172 172 163 163 6 5 4 5 4 4 4 6 6 50 229 229 13 21 15 11 10 15 13 13 10 10 12 7 6 6 1 1 6 15 11 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 | // SPDX-License-Identifier: GPL-2.0 /* * fs/f2fs/data.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/sched/mm.h> #include <linux/mpage.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/blk-crypto.h> #include <linux/swap.h> #include <linux/prefetch.h> #include <linux/uio.h> #include <linux/sched/signal.h> #include <linux/fiemap.h> #include <linux/iomap.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include "iostat.h" #include <trace/events/f2fs.h> #define NUM_PREALLOC_POST_READ_CTXS 128 static struct kmem_cache *bio_post_read_ctx_cache; static struct kmem_cache *bio_entry_slab; static mempool_t *bio_post_read_ctx_pool; static struct bio_set f2fs_bioset; #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE int __init f2fs_init_bioset(void) { return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); } void f2fs_destroy_bioset(void) { bioset_exit(&f2fs_bioset); } bool f2fs_is_cp_guaranteed(struct page *page) { struct address_space *mapping = page->mapping; struct inode *inode; struct f2fs_sb_info *sbi; if (!mapping) return false; inode = mapping->host; sbi = F2FS_I_SB(inode); if (inode->i_ino == F2FS_META_INO(sbi) || inode->i_ino == F2FS_NODE_INO(sbi) || S_ISDIR(inode->i_mode)) return true; if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) || page_private_gcing(page)) return true; return false; } static enum count_type __read_io_type(struct folio *folio) { struct address_space *mapping = folio->mapping; if (mapping) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (inode->i_ino == F2FS_META_INO(sbi)) return F2FS_RD_META; if (inode->i_ino == F2FS_NODE_INO(sbi)) return F2FS_RD_NODE; } return F2FS_RD_DATA; } /* postprocessing steps for read bios */ enum bio_post_read_step { #ifdef CONFIG_FS_ENCRYPTION STEP_DECRYPT = BIT(0), #else STEP_DECRYPT = 0, /* compile out the decryption-related code */ #endif #ifdef CONFIG_F2FS_FS_COMPRESSION STEP_DECOMPRESS = BIT(1), #else STEP_DECOMPRESS = 0, /* compile out the decompression-related code */ #endif #ifdef CONFIG_FS_VERITY STEP_VERITY = BIT(2), #else STEP_VERITY = 0, /* compile out the verity-related code */ #endif }; struct bio_post_read_ctx { struct bio *bio; struct f2fs_sb_info *sbi; struct work_struct work; unsigned int enabled_steps; /* * decompression_attempted keeps track of whether * f2fs_end_read_compressed_page() has been called on the pages in the * bio that belong to a compressed cluster yet. */ bool decompression_attempted; block_t fs_blkaddr; }; /* * Update and unlock a bio's pages, and free the bio. * * This marks pages up-to-date only if there was no error in the bio (I/O error, * decryption error, or verity error), as indicated by bio->bi_status. * * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk) * aren't marked up-to-date here, as decompression is done on a per-compression- * cluster basis rather than a per-bio basis. Instead, we only must do two * things for each compressed page here: call f2fs_end_read_compressed_page() * with failed=true if an error occurred before it would have normally gotten * called (i.e., I/O error or decryption error, but *not* verity error), and * release the bio's reference to the decompress_io_ctx of the page's cluster. */ static void f2fs_finish_read_bio(struct bio *bio, bool in_task) { struct folio_iter fi; struct bio_post_read_ctx *ctx = bio->bi_private; bio_for_each_folio_all(fi, bio) { struct folio *folio = fi.folio; if (f2fs_is_compressed_page(&folio->page)) { if (ctx && !ctx->decompression_attempted) f2fs_end_read_compressed_page(&folio->page, true, 0, in_task); f2fs_put_page_dic(&folio->page, in_task); continue; } dec_page_count(F2FS_F_SB(folio), __read_io_type(folio)); folio_end_read(folio, bio->bi_status == 0); } if (ctx) mempool_free(ctx, bio_post_read_ctx_pool); bio_put(bio); } static void f2fs_verify_bio(struct work_struct *work) { struct bio_post_read_ctx *ctx = container_of(work, struct bio_post_read_ctx, work); struct bio *bio = ctx->bio; bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS); /* * fsverity_verify_bio() may call readahead() again, and while verity * will be disabled for this, decryption and/or decompression may still * be needed, resulting in another bio_post_read_ctx being allocated. * So to prevent deadlocks we need to release the current ctx to the * mempool first. This assumes that verity is the last post-read step. */ mempool_free(ctx, bio_post_read_ctx_pool); bio->bi_private = NULL; /* * Verify the bio's pages with fs-verity. Exclude compressed pages, * as those were handled separately by f2fs_end_read_compressed_page(). */ if (may_have_compressed_pages) { struct bio_vec *bv; struct bvec_iter_all iter_all; bio_for_each_segment_all(bv, bio, iter_all) { struct page *page = bv->bv_page; if (!f2fs_is_compressed_page(page) && !fsverity_verify_page(page)) { bio->bi_status = BLK_STS_IOERR; break; } } } else { fsverity_verify_bio(bio); } f2fs_finish_read_bio(bio, true); } /* * If the bio's data needs to be verified with fs-verity, then enqueue the * verity work for the bio. Otherwise finish the bio now. * * Note that to avoid deadlocks, the verity work can't be done on the * decryption/decompression workqueue. This is because verifying the data pages * can involve reading verity metadata pages from the file, and these verity * metadata pages may be encrypted and/or compressed. */ static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task) { struct bio_post_read_ctx *ctx = bio->bi_private; if (ctx && (ctx->enabled_steps & STEP_VERITY)) { INIT_WORK(&ctx->work, f2fs_verify_bio); fsverity_enqueue_verify_work(&ctx->work); } else { f2fs_finish_read_bio(bio, in_task); } } /* * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last * remaining page was read by @ctx->bio. * * Note that a bio may span clusters (even a mix of compressed and uncompressed * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates * that the bio includes at least one compressed page. The actual decompression * is done on a per-cluster basis, not a per-bio basis. */ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx, bool in_task) { struct bio_vec *bv; struct bvec_iter_all iter_all; bool all_compressed = true; block_t blkaddr = ctx->fs_blkaddr; bio_for_each_segment_all(bv, ctx->bio, iter_all) { struct page *page = bv->bv_page; if (f2fs_is_compressed_page(page)) f2fs_end_read_compressed_page(page, false, blkaddr, in_task); else all_compressed = false; blkaddr++; } ctx->decompression_attempted = true; /* * Optimization: if all the bio's pages are compressed, then scheduling * the per-bio verity work is unnecessary, as verity will be fully * handled at the compression cluster level. */ if (all_compressed) ctx->enabled_steps &= ~STEP_VERITY; } static void f2fs_post_read_work(struct work_struct *work) { struct bio_post_read_ctx *ctx = container_of(work, struct bio_post_read_ctx, work); struct bio *bio = ctx->bio; if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) { f2fs_finish_read_bio(bio, true); return; } if (ctx->enabled_steps & STEP_DECOMPRESS) f2fs_handle_step_decompress(ctx, true); f2fs_verify_and_finish_bio(bio, true); } static void f2fs_read_end_io(struct bio *bio) { struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio)); struct bio_post_read_ctx *ctx; bool intask = in_task(); iostat_update_and_unbind_ctx(bio); ctx = bio->bi_private; if (time_to_inject(sbi, FAULT_READ_IO)) bio->bi_status = BLK_STS_IOERR; if (bio->bi_status) { f2fs_finish_read_bio(bio, intask); return; } if (ctx) { unsigned int enabled_steps = ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS); /* * If we have only decompression step between decompression and * decrypt, we don't need post processing for this. */ if (enabled_steps == STEP_DECOMPRESS && !f2fs_low_mem_mode(sbi)) { f2fs_handle_step_decompress(ctx, intask); } else if (enabled_steps) { INIT_WORK(&ctx->work, f2fs_post_read_work); queue_work(ctx->sbi->post_read_wq, &ctx->work); return; } } f2fs_verify_and_finish_bio(bio, intask); } static void f2fs_write_end_io(struct bio *bio) { struct f2fs_sb_info *sbi; struct folio_iter fi; iostat_update_and_unbind_ctx(bio); sbi = bio->bi_private; if (time_to_inject(sbi, FAULT_WRITE_IO)) bio->bi_status = BLK_STS_IOERR; bio_for_each_folio_all(fi, bio) { struct folio *folio = fi.folio; enum count_type type; if (fscrypt_is_bounce_folio(folio)) { struct folio *io_folio = folio; folio = fscrypt_pagecache_folio(io_folio); fscrypt_free_bounce_page(&io_folio->page); } #ifdef CONFIG_F2FS_FS_COMPRESSION if (f2fs_is_compressed_page(&folio->page)) { f2fs_compress_write_end_io(bio, &folio->page); continue; } #endif type = WB_DATA_TYPE(&folio->page, false); if (unlikely(bio->bi_status)) { mapping_set_error(folio->mapping, -EIO); if (type == F2FS_WB_CP_DATA) f2fs_stop_checkpoint(sbi, true, STOP_CP_REASON_WRITE_FAIL); } f2fs_bug_on(sbi, folio->mapping == NODE_MAPPING(sbi) && folio->index != nid_of_node(&folio->page)); dec_page_count(sbi, type); if (f2fs_in_warm_node_list(sbi, folio)) f2fs_del_fsync_node_entry(sbi, &folio->page); clear_page_private_gcing(&folio->page); folio_end_writeback(folio); } if (!get_pages(sbi, F2FS_WB_CP_DATA) && wq_has_sleeper(&sbi->cp_wait)) wake_up(&sbi->cp_wait); bio_put(bio); } #ifdef CONFIG_BLK_DEV_ZONED static void f2fs_zone_write_end_io(struct bio *bio) { struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private; bio->bi_private = io->bi_private; complete(&io->zone_wait); f2fs_write_end_io(bio); } #endif struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, block_t blk_addr, sector_t *sector) { struct block_device *bdev = sbi->sb->s_bdev; int i; if (f2fs_is_multi_device(sbi)) { for (i = 0; i < sbi->s_ndevs; i++) { if (FDEV(i).start_blk <= blk_addr && FDEV(i).end_blk >= blk_addr) { blk_addr -= FDEV(i).start_blk; bdev = FDEV(i).bdev; break; } } } if (sector) *sector = SECTOR_FROM_BLOCK(blk_addr); return bdev; } int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr) { int i; if (!f2fs_is_multi_device(sbi)) return 0; for (i = 0; i < sbi->s_ndevs; i++) if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr) return i; return 0; } static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio) { unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0); struct folio *fio_folio = page_folio(fio->page); unsigned int fua_flag, meta_flag, io_flag; blk_opf_t op_flags = 0; if (fio->op != REQ_OP_WRITE) return 0; if (fio->type == DATA) io_flag = fio->sbi->data_io_flag; else if (fio->type == NODE) io_flag = fio->sbi->node_io_flag; else return 0; fua_flag = io_flag & temp_mask; meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask; /* * data/node io flag bits per temp: * REQ_META | REQ_FUA | * 5 | 4 | 3 | 2 | 1 | 0 | * Cold | Warm | Hot | Cold | Warm | Hot | */ if (BIT(fio->temp) & meta_flag) op_flags |= REQ_META; if (BIT(fio->temp) & fua_flag) op_flags |= REQ_FUA; if (fio->type == DATA && F2FS_I(fio_folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE) op_flags |= REQ_PRIO; return op_flags; } static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) { struct f2fs_sb_info *sbi = fio->sbi; struct block_device *bdev; sector_t sector; struct bio *bio; bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or); bio = bio_alloc_bioset(bdev, npages, fio->op | fio->op_flags | f2fs_io_flags(fio), GFP_NOIO, &f2fs_bioset); bio->bi_iter.bi_sector = sector; if (is_read_io(fio->op)) { bio->bi_end_io = f2fs_read_end_io; bio->bi_private = NULL; } else { bio->bi_end_io = f2fs_write_end_io; bio->bi_private = sbi; bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, fio->type, fio->temp); } iostat_alloc_and_bind_ctx(sbi, bio, NULL); if (fio->io_wbc) wbc_init_bio(fio->io_wbc, bio); return bio; } static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, pgoff_t first_idx, const struct f2fs_io_info *fio, gfp_t gfp_mask) { /* * The f2fs garbage collector sets ->encrypted_page when it wants to * read/write raw data without encryption. */ if (!fio || !fio->encrypted_page) fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask); } static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, pgoff_t next_idx, const struct f2fs_io_info *fio) { /* * The f2fs garbage collector sets ->encrypted_page when it wants to * read/write raw data without encryption. */ if (fio && fio->encrypted_page) return !bio_has_crypt_ctx(bio); return fscrypt_mergeable_bio(bio, inode, next_idx); } void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio, enum page_type type) { WARN_ON_ONCE(!is_read_io(bio_op(bio))); trace_f2fs_submit_read_bio(sbi->sb, type, bio); iostat_update_submit_ctx(bio, type); submit_bio(bio); } static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio, enum page_type type) { WARN_ON_ONCE(is_read_io(bio_op(bio))); trace_f2fs_submit_write_bio(sbi->sb, type, bio); iostat_update_submit_ctx(bio, type); submit_bio(bio); } static void __submit_merged_bio(struct f2fs_bio_info *io) { struct f2fs_io_info *fio = &io->fio; if (!io->bio) return; if (is_read_io(fio->op)) { trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio); f2fs_submit_read_bio(io->sbi, io->bio, fio->type); } else { trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio); f2fs_submit_write_bio(io->sbi, io->bio, fio->type); } io->bio = NULL; } static bool __has_merged_page(struct bio *bio, struct inode *inode, struct page *page, nid_t ino) { struct bio_vec *bvec; struct bvec_iter_all iter_all; if (!bio) return false; if (!inode && !page && !ino) return true; bio_for_each_segment_all(bvec, bio, iter_all) { struct page *target = bvec->bv_page; if (fscrypt_is_bounce_page(target)) { target = fscrypt_pagecache_page(target); if (IS_ERR(target)) continue; } if (f2fs_is_compressed_page(target)) { target = f2fs_compress_control_page(target); if (IS_ERR(target)) continue; } if (inode && inode == target->mapping->host) return true; if (page && page == target) return true; if (ino && ino == ino_of_node(target)) return true; } return false; } int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi) { int i; for (i = 0; i < NR_PAGE_TYPE; i++) { int n = (i == META) ? 1 : NR_TEMP_TYPE; int j; sbi->write_io[i] = f2fs_kmalloc(sbi, array_size(n, sizeof(struct f2fs_bio_info)), GFP_KERNEL); if (!sbi->write_io[i]) return -ENOMEM; for (j = HOT; j < n; j++) { struct f2fs_bio_info *io = &sbi->write_io[i][j]; init_f2fs_rwsem(&io->io_rwsem); io->sbi = sbi; io->bio = NULL; io->last_block_in_bio = 0; spin_lock_init(&io->io_lock); INIT_LIST_HEAD(&io->io_list); INIT_LIST_HEAD(&io->bio_list); init_f2fs_rwsem(&io->bio_list_lock); #ifdef CONFIG_BLK_DEV_ZONED init_completion(&io->zone_wait); io->zone_pending_bio = NULL; io->bi_private = NULL; #endif } } return 0; } static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type, enum temp_type temp) { enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io = sbi->write_io[btype] + temp; f2fs_down_write(&io->io_rwsem); if (!io->bio) goto unlock_out; /* change META to META_FLUSH in the checkpoint procedure */ if (type >= META_FLUSH) { io->fio.type = META_FLUSH; io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC; if (!test_opt(sbi, NOBARRIER)) io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA; } __submit_merged_bio(io); unlock_out: f2fs_up_write(&io->io_rwsem); } static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page, nid_t ino, enum page_type type, bool force) { enum temp_type temp; bool ret = true; for (temp = HOT; temp < NR_TEMP_TYPE; temp++) { if (!force) { enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io = sbi->write_io[btype] + temp; f2fs_down_read(&io->io_rwsem); ret = __has_merged_page(io->bio, inode, page, ino); f2fs_up_read(&io->io_rwsem); } if (ret) __f2fs_submit_merged_write(sbi, type, temp); /* TODO: use HOT temp only for meta pages now. */ if (type >= META) break; } } void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type) { __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true); } void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page, nid_t ino, enum page_type type) { __submit_merged_write_cond(sbi, inode, page, ino, type, false); } void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi) { f2fs_submit_merged_write(sbi, DATA); f2fs_submit_merged_write(sbi, NODE); f2fs_submit_merged_write(sbi, META); } /* * Fill the locked page with data located in the block address. * A caller needs to unlock the page on failure. */ int f2fs_submit_page_bio(struct f2fs_io_info *fio) { struct bio *bio; struct folio *fio_folio = page_folio(fio->page); struct folio *data_folio = fio->encrypted_page ? page_folio(fio->encrypted_page) : fio_folio; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, fio->is_por ? META_POR : (__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC_ENHANCE))) return -EFSCORRUPTED; trace_f2fs_submit_folio_bio(data_folio, fio); /* Allocate a new bio */ bio = __bio_alloc(fio, 1); f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host, fio_folio->index, fio, GFP_NOIO); bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0); if (fio->io_wbc && !is_read_io(fio->op)) wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE); inc_page_count(fio->sbi, is_read_io(fio->op) ? __read_io_type(data_folio) : WB_DATA_TYPE(fio->page, false)); if (is_read_io(bio_op(bio))) f2fs_submit_read_bio(fio->sbi, bio, fio->type); else f2fs_submit_write_bio(fio->sbi, bio, fio->type); return 0; } static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, block_t last_blkaddr, block_t cur_blkaddr) { if (unlikely(sbi->max_io_bytes && bio->bi_iter.bi_size >= sbi->max_io_bytes)) return false; if (last_blkaddr + 1 != cur_blkaddr) return false; return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL); } static bool io_type_is_mergeable(struct f2fs_bio_info *io, struct f2fs_io_info *fio) { if (io->fio.op != fio->op) return false; return io->fio.op_flags == fio->op_flags; } static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, struct f2fs_bio_info *io, struct f2fs_io_info *fio, block_t last_blkaddr, block_t cur_blkaddr) { if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr)) return false; return io_type_is_mergeable(io, fio); } static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio, struct page *page, enum temp_type temp) { struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; struct bio_entry *be; be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL); be->bio = bio; bio_get(bio); if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) f2fs_bug_on(sbi, 1); f2fs_down_write(&io->bio_list_lock); list_add_tail(&be->list, &io->bio_list); f2fs_up_write(&io->bio_list_lock); } static void del_bio_entry(struct bio_entry *be) { list_del(&be->list); kmem_cache_free(bio_entry_slab, be); } static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, struct page *page) { struct f2fs_sb_info *sbi = fio->sbi; enum temp_type temp; bool found = false; int ret = -EAGAIN; for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) { struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; struct list_head *head = &io->bio_list; struct bio_entry *be; f2fs_down_write(&io->bio_list_lock); list_for_each_entry(be, head, list) { if (be->bio != *bio) continue; found = true; f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio, *fio->last_block, fio->new_blkaddr)); if (f2fs_crypt_mergeable_bio(*bio, fio->page->mapping->host, page_folio(fio->page)->index, fio) && bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) { ret = 0; break; } /* page can't be merged into bio; submit the bio */ del_bio_entry(be); f2fs_submit_write_bio(sbi, *bio, DATA); break; } f2fs_up_write(&io->bio_list_lock); } if (ret) { bio_put(*bio); *bio = NULL; } return ret; } void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, struct bio **bio, struct page *page) { enum temp_type temp; bool found = false; struct bio *target = bio ? *bio : NULL; f2fs_bug_on(sbi, !target && !page); for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) { struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; struct list_head *head = &io->bio_list; struct bio_entry *be; if (list_empty(head)) continue; f2fs_down_read(&io->bio_list_lock); list_for_each_entry(be, head, list) { if (target) found = (target == be->bio); else found = __has_merged_page(be->bio, NULL, page, 0); if (found) break; } f2fs_up_read(&io->bio_list_lock); if (!found) continue; found = false; f2fs_down_write(&io->bio_list_lock); list_for_each_entry(be, head, list) { if (target) found = (target == be->bio); else found = __has_merged_page(be->bio, NULL, page, 0); if (found) { target = be->bio; del_bio_entry(be); break; } } f2fs_up_write(&io->bio_list_lock); } if (found) f2fs_submit_write_bio(sbi, target, DATA); if (bio && *bio) { bio_put(*bio); *bio = NULL; } } int f2fs_merge_page_bio(struct f2fs_io_info *fio) { struct bio *bio = *fio->bio; struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; struct folio *folio = page_folio(fio->page); if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) return -EFSCORRUPTED; trace_f2fs_submit_folio_bio(page_folio(page), fio); if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, fio->new_blkaddr)) f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); alloc_new: if (!bio) { bio = __bio_alloc(fio, BIO_MAX_VECS); f2fs_set_bio_crypt_ctx(bio, folio->mapping->host, folio->index, fio, GFP_NOIO); add_bio_entry(fio->sbi, bio, page, fio->temp); } else { if (add_ipu_page(fio, &bio, page)) goto alloc_new; } if (fio->io_wbc) wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio)); inc_page_count(fio->sbi, WB_DATA_TYPE(page, false)); *fio->last_block = fio->new_blkaddr; *fio->bio = bio; return 0; } #ifdef CONFIG_BLK_DEV_ZONED static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr) { struct block_device *bdev = sbi->sb->s_bdev; int devi = 0; if (f2fs_is_multi_device(sbi)) { devi = f2fs_target_device_index(sbi, blkaddr); if (blkaddr < FDEV(devi).start_blk || blkaddr > FDEV(devi).end_blk) { f2fs_err(sbi, "Invalid block %x", blkaddr); return false; } blkaddr -= FDEV(devi).start_blk; bdev = FDEV(devi).bdev; } return bdev_is_zoned(bdev) && f2fs_blkz_is_seq(sbi, devi, blkaddr) && (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1); } #endif void f2fs_submit_page_write(struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = fio->sbi; enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; struct page *bio_page; enum count_type type; f2fs_bug_on(sbi, is_read_io(fio->op)); f2fs_down_write(&io->io_rwsem); next: #ifdef CONFIG_BLK_DEV_ZONED if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) { wait_for_completion_io(&io->zone_wait); bio_put(io->zone_pending_bio); io->zone_pending_bio = NULL; io->bi_private = NULL; } #endif if (fio->in_list) { spin_lock(&io->io_lock); if (list_empty(&io->io_list)) { spin_unlock(&io->io_lock); goto out; } fio = list_first_entry(&io->io_list, struct f2fs_io_info, list); list_del(&fio->list); spin_unlock(&io->io_lock); } verify_fio_blkaddr(fio); if (fio->encrypted_page) bio_page = fio->encrypted_page; else if (fio->compressed_page) bio_page = fio->compressed_page; else bio_page = fio->page; /* set submitted = true as a return value */ fio->submitted = 1; type = WB_DATA_TYPE(bio_page, fio->compressed_page); inc_page_count(sbi, type); if (io->bio && (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, fio->new_blkaddr) || !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host, page_folio(bio_page)->index, fio))) __submit_merged_bio(io); alloc_new: if (io->bio == NULL) { io->bio = __bio_alloc(fio, BIO_MAX_VECS); f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host, page_folio(bio_page)->index, fio, GFP_NOIO); io->fio = *fio; } if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { __submit_merged_bio(io); goto alloc_new; } if (fio->io_wbc) wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page), PAGE_SIZE); io->last_block_in_bio = fio->new_blkaddr; trace_f2fs_submit_folio_write(page_folio(fio->page), fio); #ifdef CONFIG_BLK_DEV_ZONED if (f2fs_sb_has_blkzoned(sbi) && btype < META && is_end_zone_blkaddr(sbi, fio->new_blkaddr)) { bio_get(io->bio); reinit_completion(&io->zone_wait); io->bi_private = io->bio->bi_private; io->bio->bi_private = io; io->bio->bi_end_io = f2fs_zone_write_end_io; io->zone_pending_bio = io->bio; __submit_merged_bio(io); } #endif if (fio->in_list) goto next; out: if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || !f2fs_is_checkpoint_ready(sbi)) __submit_merged_bio(io); f2fs_up_write(&io->io_rwsem); } static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, unsigned nr_pages, blk_opf_t op_flag, pgoff_t first_idx, bool for_write) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct bio *bio; struct bio_post_read_ctx *ctx = NULL; unsigned int post_read_steps = 0; sector_t sector; struct block_device *bdev = f2fs_target_device(sbi, blkaddr, §or); bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages), REQ_OP_READ | op_flag, for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset); bio->bi_iter.bi_sector = sector; f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS); bio->bi_end_io = f2fs_read_end_io; if (fscrypt_inode_uses_fs_layer_crypto(inode)) post_read_steps |= STEP_DECRYPT; if (f2fs_need_verity(inode, first_idx)) post_read_steps |= STEP_VERITY; /* * STEP_DECOMPRESS is handled specially, since a compressed file might * contain both compressed and uncompressed clusters. We'll allocate a * bio_post_read_ctx if the file is compressed, but the caller is * responsible for enabling STEP_DECOMPRESS if it's actually needed. */ if (post_read_steps || f2fs_compressed_file(inode)) { /* Due to the mempool, this never fails. */ ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS); ctx->bio = bio; ctx->sbi = sbi; ctx->enabled_steps = post_read_steps; ctx->fs_blkaddr = blkaddr; ctx->decompression_attempted = false; bio->bi_private = ctx; } iostat_alloc_and_bind_ctx(sbi, bio, ctx); return bio; } /* This can handle encryption stuffs */ static int f2fs_submit_page_read(struct inode *inode, struct folio *folio, block_t blkaddr, blk_opf_t op_flags, bool for_write) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct bio *bio; bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags, folio->index, for_write); if (IS_ERR(bio)) return PTR_ERR(bio); /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, blkaddr); if (!bio_add_folio(bio, folio, PAGE_SIZE, 0)) { iostat_update_and_unbind_ctx(bio); if (bio->bi_private) mempool_free(bio->bi_private, bio_post_read_ctx_pool); bio_put(bio); return -EFAULT; } inc_page_count(sbi, F2FS_RD_DATA); f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE); f2fs_submit_read_bio(sbi, bio, DATA); return 0; } static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) { __le32 *addr = get_dnode_addr(dn->inode, dn->node_page); dn->data_blkaddr = blkaddr; addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); } /* * Lock ordering for the change of data block address: * ->data_page * ->node_page * update block addresses in the node page */ void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) { f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); __set_data_blkaddr(dn, blkaddr); if (set_page_dirty(dn->node_page)) dn->node_changed = true; } void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) { f2fs_set_data_blkaddr(dn, blkaddr); f2fs_update_read_extent_cache(dn); } /* dn->ofs_in_node will be returned with up-to-date last block pointer */ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); int err; if (!count) return 0; if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) return -EPERM; err = inc_valid_block_count(sbi, dn->inode, &count, true); if (unlikely(err)) return err; trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, dn->ofs_in_node, count); f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); for (; count > 0; dn->ofs_in_node++) { block_t blkaddr = f2fs_data_blkaddr(dn); if (blkaddr == NULL_ADDR) { __set_data_blkaddr(dn, NEW_ADDR); count--; } } if (set_page_dirty(dn->node_page)) dn->node_changed = true; return 0; } /* Should keep dn->ofs_in_node unchanged */ int f2fs_reserve_new_block(struct dnode_of_data *dn) { unsigned int ofs_in_node = dn->ofs_in_node; int ret; ret = f2fs_reserve_new_blocks(dn, 1); dn->ofs_in_node = ofs_in_node; return ret; } int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) { bool need_put = dn->inode_page ? false : true; int err; err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE); if (err) return err; if (dn->data_blkaddr == NULL_ADDR) err = f2fs_reserve_new_block(dn); if (err || need_put) f2fs_put_dnode(dn); return err; } struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index, blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs) { struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct folio *folio; int err; folio = f2fs_grab_cache_folio(mapping, index, for_write); if (IS_ERR(folio)) return folio; if (f2fs_lookup_read_extent_cache_block(inode, index, &dn.data_blkaddr)) { if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, DATA_GENERIC_ENHANCE_READ)) { err = -EFSCORRUPTED; goto put_err; } goto got_it; } set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err) { if (err == -ENOENT && next_pgofs) *next_pgofs = f2fs_get_next_page_offset(&dn, index); goto put_err; } f2fs_put_dnode(&dn); if (unlikely(dn.data_blkaddr == NULL_ADDR)) { err = -ENOENT; if (next_pgofs) *next_pgofs = index + 1; goto put_err; } if (dn.data_blkaddr != NEW_ADDR && !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, DATA_GENERIC_ENHANCE)) { err = -EFSCORRUPTED; goto put_err; } got_it: if (folio_test_uptodate(folio)) { folio_unlock(folio); return folio; } /* * A new dentry page is allocated but not able to be written, since its * new inode page couldn't be allocated due to -ENOSPC. * In such the case, its blkaddr can be remained as NEW_ADDR. * see, f2fs_add_link -> f2fs_get_new_data_page -> * f2fs_init_inode_metadata. */ if (dn.data_blkaddr == NEW_ADDR) { folio_zero_segment(folio, 0, folio_size(folio)); if (!folio_test_uptodate(folio)) folio_mark_uptodate(folio); folio_unlock(folio); return folio; } err = f2fs_submit_page_read(inode, folio, dn.data_blkaddr, op_flags, for_write); if (err) goto put_err; return folio; put_err: f2fs_folio_put(folio, true); return ERR_PTR(err); } struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index, pgoff_t *next_pgofs) { struct address_space *mapping = inode->i_mapping; struct folio *folio; folio = __filemap_get_folio(mapping, index, FGP_ACCESSED, 0); if (IS_ERR(folio)) goto read; if (folio_test_uptodate(folio)) return folio; f2fs_folio_put(folio, false); read: folio = f2fs_get_read_data_folio(inode, index, 0, false, next_pgofs); if (IS_ERR(folio)) return folio; if (folio_test_uptodate(folio)) return folio; folio_wait_locked(folio); if (unlikely(!folio_test_uptodate(folio))) { f2fs_folio_put(folio, false); return ERR_PTR(-EIO); } return folio; } /* * If it tries to access a hole, return an error. * Because, the callers, functions in dir.c and GC, should be able to know * whether this page exists or not. */ struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index, bool for_write) { struct address_space *mapping = inode->i_mapping; struct folio *folio; folio = f2fs_get_read_data_folio(inode, index, 0, for_write, NULL); if (IS_ERR(folio)) return folio; /* wait for read completion */ folio_lock(folio); if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) { f2fs_folio_put(folio, true); return ERR_PTR(-EIO); } return folio; } /* * Caller ensures that this data page is never allocated. * A new zero-filled data page is allocated in the page cache. * * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and * f2fs_unlock_op(). * Note that, ipage is set only by make_empty_dir, and if any error occur, * ipage should be released by this function. */ struct page *f2fs_get_new_data_page(struct inode *inode, struct page *ipage, pgoff_t index, bool new_i_size) { struct address_space *mapping = inode->i_mapping; struct page *page; struct dnode_of_data dn; int err; page = f2fs_grab_cache_page(mapping, index, true); if (!page) { /* * before exiting, we should make sure ipage will be released * if any error occur. */ f2fs_put_page(ipage, 1); return ERR_PTR(-ENOMEM); } set_new_dnode(&dn, inode, ipage, NULL, 0); err = f2fs_reserve_block(&dn, index); if (err) { f2fs_put_page(page, 1); return ERR_PTR(err); } if (!ipage) f2fs_put_dnode(&dn); if (PageUptodate(page)) goto got_it; if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_SIZE); if (!PageUptodate(page)) SetPageUptodate(page); } else { f2fs_put_page(page, 1); /* if ipage exists, blkaddr should be NEW_ADDR */ f2fs_bug_on(F2FS_I_SB(inode), ipage); page = f2fs_get_lock_data_page(inode, index, true); if (IS_ERR(page)) return page; } got_it: if (new_i_size && i_size_read(inode) < ((loff_t)(index + 1) << PAGE_SHIFT)) f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT)); return page; } static int __allocate_data_block(struct dnode_of_data *dn, int seg_type) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_summary sum; struct node_info ni; block_t old_blkaddr; blkcnt_t count = 1; int err; if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) return -EPERM; err = f2fs_get_node_info(sbi, dn->nid, &ni, false); if (err) return err; dn->data_blkaddr = f2fs_data_blkaddr(dn); if (dn->data_blkaddr == NULL_ADDR) { err = inc_valid_block_count(sbi, dn->inode, &count, true); if (unlikely(err)) return err; } set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); old_blkaddr = dn->data_blkaddr; err = f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr, &sum, seg_type, NULL); if (err) return err; if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1); f2fs_update_data_blkaddr(dn, dn->data_blkaddr); return 0; } static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag) { if (flag == F2FS_GET_BLOCK_PRE_AIO) f2fs_down_read(&sbi->node_change); else f2fs_lock_op(sbi); } static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag) { if (flag == F2FS_GET_BLOCK_PRE_AIO) f2fs_up_read(&sbi->node_change); else f2fs_unlock_op(sbi); } int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); int err = 0; f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO); if (!f2fs_lookup_read_extent_cache_block(dn->inode, index, &dn->data_blkaddr)) err = f2fs_reserve_block(dn, index); f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO); return err; } static int f2fs_map_no_dnode(struct inode *inode, struct f2fs_map_blocks *map, struct dnode_of_data *dn, pgoff_t pgoff) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); /* * There is one exceptional case that read_node_page() may return * -ENOENT due to filesystem has been shutdown or cp_error, return * -EIO in that case. */ if (map->m_may_create && (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi))) return -EIO; if (map->m_next_pgofs) *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff); if (map->m_next_extent) *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff); return 0; } static bool f2fs_map_blocks_cached(struct inode *inode, struct f2fs_map_blocks *map, int flag) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int maxblocks = map->m_len; pgoff_t pgoff = (pgoff_t)map->m_lblk; struct extent_info ei = {}; if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei)) return false; map->m_pblk = ei.blk + pgoff - ei.fofs; map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff); map->m_flags = F2FS_MAP_MAPPED; if (map->m_next_extent) *map->m_next_extent = pgoff + map->m_len; /* for hardware encryption, but to avoid potential issue in future */ if (flag == F2FS_GET_BLOCK_DIO) f2fs_wait_on_block_writeback_range(inode, map->m_pblk, map->m_len); if (f2fs_allow_multi_device_dio(sbi, flag)) { int bidx = f2fs_target_device_index(sbi, map->m_pblk); struct f2fs_dev_info *dev = &sbi->devs[bidx]; map->m_bdev = dev->bdev; map->m_pblk -= dev->start_blk; map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk); } else { map->m_bdev = inode->i_sb->s_bdev; } return true; } static bool map_is_mergeable(struct f2fs_sb_info *sbi, struct f2fs_map_blocks *map, block_t blkaddr, int flag, int bidx, int ofs) { if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev) return false; if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs)) return true; if (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) return true; if (flag == F2FS_GET_BLOCK_PRE_DIO) return true; if (flag == F2FS_GET_BLOCK_DIO && map->m_pblk == NULL_ADDR && blkaddr == NULL_ADDR) return true; return false; } /* * f2fs_map_blocks() tries to find or build mapping relationship which * maps continuous logical blocks to physical blocks, and return such * info via f2fs_map_blocks structure. */ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag) { unsigned int maxblocks = map->m_len; struct dnode_of_data dn; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE; pgoff_t pgofs, end_offset, end; int err = 0, ofs = 1; unsigned int ofs_in_node, last_ofs_in_node; blkcnt_t prealloc; block_t blkaddr; unsigned int start_pgofs; int bidx = 0; bool is_hole; if (!maxblocks) return 0; if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag)) goto out; map->m_bdev = inode->i_sb->s_bdev; map->m_multidev_dio = f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag); map->m_len = 0; map->m_flags = 0; /* it only supports block size == page size */ pgofs = (pgoff_t)map->m_lblk; end = pgofs + maxblocks; next_dnode: if (map->m_may_create) f2fs_map_lock(sbi, flag); /* When reading holes, we need its node page */ set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_dnode_of_data(&dn, pgofs, mode); if (err) { if (flag == F2FS_GET_BLOCK_BMAP) map->m_pblk = 0; if (err == -ENOENT) err = f2fs_map_no_dnode(inode, map, &dn, pgofs); goto unlock_out; } start_pgofs = pgofs; prealloc = 0; last_ofs_in_node = ofs_in_node = dn.ofs_in_node; end_offset = ADDRS_PER_PAGE(dn.node_page, inode); next_block: blkaddr = f2fs_data_blkaddr(&dn); is_hole = !__is_valid_data_blkaddr(blkaddr); if (!is_hole && !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { err = -EFSCORRUPTED; goto sync_out; } /* use out-place-update for direct IO under LFS mode */ if (map->m_may_create && (is_hole || (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) && !f2fs_is_pinned_file(inode)))) { if (unlikely(f2fs_cp_error(sbi))) { err = -EIO; goto sync_out; } switch (flag) { case F2FS_GET_BLOCK_PRE_AIO: if (blkaddr == NULL_ADDR) { prealloc++; last_ofs_in_node = dn.ofs_in_node; } break; case F2FS_GET_BLOCK_PRE_DIO: case F2FS_GET_BLOCK_DIO: err = __allocate_data_block(&dn, map->m_seg_type); if (err) goto sync_out; if (flag == F2FS_GET_BLOCK_PRE_DIO) file_need_truncate(inode); set_inode_flag(inode, FI_APPEND_WRITE); break; default: WARN_ON_ONCE(1); err = -EIO; goto sync_out; } blkaddr = dn.data_blkaddr; if (is_hole) map->m_flags |= F2FS_MAP_NEW; } else if (is_hole) { if (f2fs_compressed_file(inode) && f2fs_sanity_check_cluster(&dn)) { err = -EFSCORRUPTED; f2fs_handle_error(sbi, ERROR_CORRUPTED_CLUSTER); goto sync_out; } switch (flag) { case F2FS_GET_BLOCK_PRECACHE: goto sync_out; case F2FS_GET_BLOCK_BMAP: map->m_pblk = 0; goto sync_out; case F2FS_GET_BLOCK_FIEMAP: if (blkaddr == NULL_ADDR) { if (map->m_next_pgofs) *map->m_next_pgofs = pgofs + 1; goto sync_out; } break; case F2FS_GET_BLOCK_DIO: if (map->m_next_pgofs) *map->m_next_pgofs = pgofs + 1; break; default: /* for defragment case */ if (map->m_next_pgofs) *map->m_next_pgofs = pgofs + 1; goto sync_out; } } if (flag == F2FS_GET_BLOCK_PRE_AIO) goto skip; if (map->m_multidev_dio) bidx = f2fs_target_device_index(sbi, blkaddr); if (map->m_len == 0) { /* reserved delalloc block should be mapped for fiemap. */ if (blkaddr == NEW_ADDR) map->m_flags |= F2FS_MAP_DELALLOC; /* DIO READ and hole case, should not map the blocks. */ if (!(flag == F2FS_GET_BLOCK_DIO && is_hole && !map->m_may_create)) map->m_flags |= F2FS_MAP_MAPPED; map->m_pblk = blkaddr; map->m_len = 1; if (map->m_multidev_dio) map->m_bdev = FDEV(bidx).bdev; } else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) { ofs++; map->m_len++; } else { goto sync_out; } skip: dn.ofs_in_node++; pgofs++; /* preallocate blocks in batch for one dnode page */ if (flag == F2FS_GET_BLOCK_PRE_AIO && (pgofs == end || dn.ofs_in_node == end_offset)) { dn.ofs_in_node = ofs_in_node; err = f2fs_reserve_new_blocks(&dn, prealloc); if (err) goto sync_out; map->m_len += dn.ofs_in_node - ofs_in_node; if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) { err = -ENOSPC; goto sync_out; } dn.ofs_in_node = end_offset; } if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) && map->m_may_create) { /* the next block to be allocated may not be contiguous. */ if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) == CAP_BLKS_PER_SEC(sbi) - 1) goto sync_out; } if (pgofs >= end) goto sync_out; else if (dn.ofs_in_node < end_offset) goto next_block; if (flag == F2FS_GET_BLOCK_PRECACHE) { if (map->m_flags & F2FS_MAP_MAPPED) { unsigned int ofs = start_pgofs - map->m_lblk; f2fs_update_read_extent_cache_range(&dn, start_pgofs, map->m_pblk + ofs, map->m_len - ofs); } } f2fs_put_dnode(&dn); if (map->m_may_create) { f2fs_map_unlock(sbi, flag); f2fs_balance_fs(sbi, dn.node_changed); } goto next_dnode; sync_out: if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) { /* * for hardware encryption, but to avoid potential issue * in future */ f2fs_wait_on_block_writeback_range(inode, map->m_pblk, map->m_len); if (map->m_multidev_dio) { block_t blk_addr = map->m_pblk; bidx = f2fs_target_device_index(sbi, map->m_pblk); map->m_bdev = FDEV(bidx).bdev; map->m_pblk -= FDEV(bidx).start_blk; if (map->m_may_create) f2fs_update_device_state(sbi, inode->i_ino, blk_addr, map->m_len); f2fs_bug_on(sbi, blk_addr + map->m_len > FDEV(bidx).end_blk + 1); } } if (flag == F2FS_GET_BLOCK_PRECACHE) { if (map->m_flags & F2FS_MAP_MAPPED) { unsigned int ofs = start_pgofs - map->m_lblk; f2fs_update_read_extent_cache_range(&dn, start_pgofs, map->m_pblk + ofs, map->m_len - ofs); } if (map->m_next_extent) *map->m_next_extent = pgofs + 1; } f2fs_put_dnode(&dn); unlock_out: if (map->m_may_create) { f2fs_map_unlock(sbi, flag); f2fs_balance_fs(sbi, dn.node_changed); } out: trace_f2fs_map_blocks(inode, map, flag, err); return err; } bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len) { struct f2fs_map_blocks map; block_t last_lblk; int err; if (pos + len > i_size_read(inode)) return false; map.m_lblk = F2FS_BYTES_TO_BLK(pos); map.m_next_pgofs = NULL; map.m_next_extent = NULL; map.m_seg_type = NO_CHECK_TYPE; map.m_may_create = false; last_lblk = F2FS_BLK_ALIGN(pos + len); while (map.m_lblk < last_lblk) { map.m_len = last_lblk - map.m_lblk; err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT); if (err || map.m_len == 0) return false; map.m_lblk += map.m_len; } return true; } static int f2fs_xattr_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page; struct node_info ni; __u64 phys = 0, len; __u32 flags; nid_t xnid = F2FS_I(inode)->i_xattr_nid; int err = 0; if (f2fs_has_inline_xattr(inode)) { int offset; page = f2fs_grab_cache_page(NODE_MAPPING(sbi), inode->i_ino, false); if (!page) return -ENOMEM; err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false); if (err) { f2fs_put_page(page, 1); return err; } phys = F2FS_BLK_TO_BYTES(ni.blk_addr); offset = offsetof(struct f2fs_inode, i_addr) + sizeof(__le32) * (DEF_ADDRS_PER_INODE - get_inline_xattr_addrs(inode)); phys += offset; len = inline_xattr_size(inode); f2fs_put_page(page, 1); flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED; if (!xnid) flags |= FIEMAP_EXTENT_LAST; err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); trace_f2fs_fiemap(inode, 0, phys, len, flags, err); if (err) return err; } if (xnid) { page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false); if (!page) return -ENOMEM; err = f2fs_get_node_info(sbi, xnid, &ni, false); if (err) { f2fs_put_page(page, 1); return err; } phys = F2FS_BLK_TO_BYTES(ni.blk_addr); len = inode->i_sb->s_blocksize; f2fs_put_page(page, 1); flags = FIEMAP_EXTENT_LAST; } if (phys) { err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); trace_f2fs_fiemap(inode, 0, phys, len, flags, err); } return (err < 0 ? err : 0); } int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { struct f2fs_map_blocks map; sector_t start_blk, last_blk, blk_len, max_len; pgoff_t next_pgofs; u64 logical = 0, phys = 0, size = 0; u32 flags = 0; int ret = 0; bool compr_cluster = false, compr_appended; unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; unsigned int count_in_cluster = 0; loff_t maxbytes; if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { ret = f2fs_precache_extents(inode); if (ret) return ret; } ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR); if (ret) return ret; inode_lock_shared(inode); maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode)); if (start > maxbytes) { ret = -EFBIG; goto out; } if (len > maxbytes || (maxbytes - len) < start) len = maxbytes - start; if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { ret = f2fs_xattr_fiemap(inode, fieinfo); goto out; } if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len); if (ret != -EAGAIN) goto out; } start_blk = F2FS_BYTES_TO_BLK(start); last_blk = F2FS_BYTES_TO_BLK(start + len - 1); blk_len = last_blk - start_blk + 1; max_len = F2FS_BYTES_TO_BLK(maxbytes) - start_blk; next: memset(&map, 0, sizeof(map)); map.m_lblk = start_blk; map.m_len = blk_len; map.m_next_pgofs = &next_pgofs; map.m_seg_type = NO_CHECK_TYPE; if (compr_cluster) { map.m_lblk += 1; map.m_len = cluster_size - count_in_cluster; } ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP); if (ret) goto out; /* HOLE */ if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) { start_blk = next_pgofs; if (F2FS_BLK_TO_BYTES(start_blk) < maxbytes) goto prep_next; flags |= FIEMAP_EXTENT_LAST; } /* * current extent may cross boundary of inquiry, increase len to * requery. */ if (!compr_cluster && (map.m_flags & F2FS_MAP_MAPPED) && map.m_lblk + map.m_len - 1 == last_blk && blk_len != max_len) { blk_len = max_len; goto next; } compr_appended = false; /* In a case of compressed cluster, append this to the last extent */ if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) || !(map.m_flags & F2FS_MAP_FLAGS))) { compr_appended = true; goto skip_fill; } if (size) { flags |= FIEMAP_EXTENT_MERGED; if (IS_ENCRYPTED(inode)) flags |= FIEMAP_EXTENT_DATA_ENCRYPTED; ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); trace_f2fs_fiemap(inode, logical, phys, size, flags, ret); if (ret) goto out; size = 0; } if (start_blk > last_blk) goto out; skip_fill: if (map.m_pblk == COMPRESS_ADDR) { compr_cluster = true; count_in_cluster = 1; } else if (compr_appended) { unsigned int appended_blks = cluster_size - count_in_cluster + 1; size += F2FS_BLK_TO_BYTES(appended_blks); start_blk += appended_blks; compr_cluster = false; } else { logical = F2FS_BLK_TO_BYTES(start_blk); phys = __is_valid_data_blkaddr(map.m_pblk) ? F2FS_BLK_TO_BYTES(map.m_pblk) : 0; size = F2FS_BLK_TO_BYTES(map.m_len); flags = 0; if (compr_cluster) { flags = FIEMAP_EXTENT_ENCODED; count_in_cluster += map.m_len; if (count_in_cluster == cluster_size) { compr_cluster = false; size += F2FS_BLKSIZE; } } else if (map.m_flags & F2FS_MAP_DELALLOC) { flags = FIEMAP_EXTENT_UNWRITTEN; } start_blk += F2FS_BYTES_TO_BLK(size); } prep_next: cond_resched(); if (fatal_signal_pending(current)) ret = -EINTR; else goto next; out: if (ret == 1) ret = 0; inode_unlock_shared(inode); return ret; } static inline loff_t f2fs_readpage_limit(struct inode *inode) { if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) return F2FS_BLK_TO_BYTES(max_file_blocks(inode)); return i_size_read(inode); } static inline blk_opf_t f2fs_ra_op_flags(struct readahead_control *rac) { return rac ? REQ_RAHEAD : 0; } static int f2fs_read_single_page(struct inode *inode, struct folio *folio, unsigned nr_pages, struct f2fs_map_blocks *map, struct bio **bio_ret, sector_t *last_block_in_bio, struct readahead_control *rac) { struct bio *bio = *bio_ret; const unsigned int blocksize = F2FS_BLKSIZE; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; sector_t block_nr; pgoff_t index = folio_index(folio); int ret = 0; block_in_file = (sector_t)index; last_block = block_in_file + nr_pages; last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) + blocksize - 1); if (last_block > last_block_in_file) last_block = last_block_in_file; /* just zeroing out page which is beyond EOF */ if (block_in_file >= last_block) goto zero_out; /* * Map blocks using the previous result first. */ if ((map->m_flags & F2FS_MAP_MAPPED) && block_in_file > map->m_lblk && block_in_file < (map->m_lblk + map->m_len)) goto got_it; /* * Then do more f2fs_map_blocks() calls until we are * done with this page. */ map->m_lblk = block_in_file; map->m_len = last_block - block_in_file; ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT); if (ret) goto out; got_it: if ((map->m_flags & F2FS_MAP_MAPPED)) { block_nr = map->m_pblk + block_in_file - map->m_lblk; folio_set_mappedtodisk(folio); if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr, DATA_GENERIC_ENHANCE_READ)) { ret = -EFSCORRUPTED; goto out; } } else { zero_out: folio_zero_segment(folio, 0, folio_size(folio)); if (f2fs_need_verity(inode, index) && !fsverity_verify_folio(folio)) { ret = -EIO; goto out; } if (!folio_test_uptodate(folio)) folio_mark_uptodate(folio); folio_unlock(folio); goto out; } /* * This page will go to BIO. Do we need to send this * BIO off first? */ if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio, *last_block_in_bio, block_nr) || !f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) { submit_and_realloc: f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; } if (bio == NULL) { bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, f2fs_ra_op_flags(rac), index, false); if (IS_ERR(bio)) { ret = PTR_ERR(bio); bio = NULL; goto out; } } /* * If the page is under writeback, we need to wait for * its completion to see the correct decrypted data. */ f2fs_wait_on_block_writeback(inode, block_nr); if (!bio_add_folio(bio, folio, blocksize, 0)) goto submit_and_realloc; inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO, F2FS_BLKSIZE); *last_block_in_bio = block_nr; out: *bio_ret = bio; return ret; } #ifdef CONFIG_F2FS_FS_COMPRESSION int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, unsigned nr_pages, sector_t *last_block_in_bio, struct readahead_control *rac, bool for_write) { struct dnode_of_data dn; struct inode *inode = cc->inode; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct bio *bio = *bio_ret; unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size; sector_t last_block_in_file; const unsigned int blocksize = F2FS_BLKSIZE; struct decompress_io_ctx *dic = NULL; struct extent_info ei = {}; bool from_dnode = true; int i; int ret = 0; if (unlikely(f2fs_cp_error(sbi))) { ret = -EIO; from_dnode = false; goto out_put_dnode; } f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) + blocksize - 1); /* get rid of pages beyond EOF */ for (i = 0; i < cc->cluster_size; i++) { struct page *page = cc->rpages[i]; struct folio *folio; if (!page) continue; folio = page_folio(page); if ((sector_t)folio->index >= last_block_in_file) { folio_zero_segment(folio, 0, folio_size(folio)); if (!folio_test_uptodate(folio)) folio_mark_uptodate(folio); } else if (!folio_test_uptodate(folio)) { continue; } folio_unlock(folio); if (for_write) folio_put(folio); cc->rpages[i] = NULL; cc->nr_rpages--; } /* we are done since all pages are beyond EOF */ if (f2fs_cluster_is_empty(cc)) goto out; if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei)) from_dnode = false; if (!from_dnode) goto skip_reading_dnode; set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); if (ret) goto out; f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR); skip_reading_dnode: for (i = 1; i < cc->cluster_size; i++) { block_t blkaddr; blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page, dn.ofs_in_node + i) : ei.blk + i - 1; if (!__is_valid_data_blkaddr(blkaddr)) break; if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) { ret = -EFAULT; goto out_put_dnode; } cc->nr_cpages++; if (!from_dnode && i >= ei.c_len) break; } /* nothing to decompress */ if (cc->nr_cpages == 0) { ret = 0; goto out_put_dnode; } dic = f2fs_alloc_dic(cc); if (IS_ERR(dic)) { ret = PTR_ERR(dic); goto out_put_dnode; } for (i = 0; i < cc->nr_cpages; i++) { struct folio *folio = page_folio(dic->cpages[i]); block_t blkaddr; struct bio_post_read_ctx *ctx; blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page, dn.ofs_in_node + i + 1) : ei.blk + i; f2fs_wait_on_block_writeback(inode, blkaddr); if (f2fs_load_compressed_page(sbi, folio_page(folio, 0), blkaddr)) { if (atomic_dec_and_test(&dic->remaining_pages)) { f2fs_decompress_cluster(dic, true); break; } continue; } if (bio && (!page_is_mergeable(sbi, bio, *last_block_in_bio, blkaddr) || !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) { submit_and_realloc: f2fs_submit_read_bio(sbi, bio, DATA); bio = NULL; } if (!bio) { bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages, f2fs_ra_op_flags(rac), folio->index, for_write); if (IS_ERR(bio)) { ret = PTR_ERR(bio); f2fs_decompress_end_io(dic, ret, true); f2fs_put_dnode(&dn); *bio_ret = NULL; return ret; } } if (!bio_add_folio(bio, folio, blocksize, 0)) goto submit_and_realloc; ctx = get_post_read_ctx(bio); ctx->enabled_steps |= STEP_DECOMPRESS; refcount_inc(&dic->refcnt); inc_page_count(sbi, F2FS_RD_DATA); f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); *last_block_in_bio = blkaddr; } if (from_dnode) f2fs_put_dnode(&dn); *bio_ret = bio; return 0; out_put_dnode: if (from_dnode) f2fs_put_dnode(&dn); out: for (i = 0; i < cc->cluster_size; i++) { if (cc->rpages[i]) { ClearPageUptodate(cc->rpages[i]); unlock_page(cc->rpages[i]); } } *bio_ret = bio; return ret; } #endif /* * This function was originally taken from fs/mpage.c, and customized for f2fs. * Major change was from block_size == page_size in f2fs by default. */ static int f2fs_mpage_readpages(struct inode *inode, struct readahead_control *rac, struct folio *folio) { struct bio *bio = NULL; sector_t last_block_in_bio = 0; struct f2fs_map_blocks map; #ifdef CONFIG_F2FS_FS_COMPRESSION struct compress_ctx cc = { .inode = inode, .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, .cluster_size = F2FS_I(inode)->i_cluster_size, .cluster_idx = NULL_CLUSTER, .rpages = NULL, .cpages = NULL, .nr_rpages = 0, .nr_cpages = 0, }; pgoff_t nc_cluster_idx = NULL_CLUSTER; pgoff_t index; #endif unsigned nr_pages = rac ? readahead_count(rac) : 1; unsigned max_nr_pages = nr_pages; int ret = 0; map.m_pblk = 0; map.m_lblk = 0; map.m_len = 0; map.m_flags = 0; map.m_next_pgofs = NULL; map.m_next_extent = NULL; map.m_seg_type = NO_CHECK_TYPE; map.m_may_create = false; for (; nr_pages; nr_pages--) { if (rac) { folio = readahead_folio(rac); prefetchw(&folio->flags); } #ifdef CONFIG_F2FS_FS_COMPRESSION index = folio_index(folio); if (!f2fs_compressed_file(inode)) goto read_single_page; /* there are remained compressed pages, submit them */ if (!f2fs_cluster_can_merge_page(&cc, index)) { ret = f2fs_read_multi_pages(&cc, &bio, max_nr_pages, &last_block_in_bio, rac, false); f2fs_destroy_compress_ctx(&cc, false); if (ret) goto set_error_page; } if (cc.cluster_idx == NULL_CLUSTER) { if (nc_cluster_idx == index >> cc.log_cluster_size) goto read_single_page; ret = f2fs_is_compressed_cluster(inode, index); if (ret < 0) goto set_error_page; else if (!ret) { nc_cluster_idx = index >> cc.log_cluster_size; goto read_single_page; } nc_cluster_idx = NULL_CLUSTER; } ret = f2fs_init_compress_ctx(&cc); if (ret) goto set_error_page; f2fs_compress_ctx_add_page(&cc, folio); goto next_page; read_single_page: #endif ret = f2fs_read_single_page(inode, folio, max_nr_pages, &map, &bio, &last_block_in_bio, rac); if (ret) { #ifdef CONFIG_F2FS_FS_COMPRESSION set_error_page: #endif folio_zero_segment(folio, 0, folio_size(folio)); folio_unlock(folio); } #ifdef CONFIG_F2FS_FS_COMPRESSION next_page: #endif #ifdef CONFIG_F2FS_FS_COMPRESSION if (f2fs_compressed_file(inode)) { /* last page */ if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) { ret = f2fs_read_multi_pages(&cc, &bio, max_nr_pages, &last_block_in_bio, rac, false); f2fs_destroy_compress_ctx(&cc, false); } } #endif } if (bio) f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA); return ret; } static int f2fs_read_data_folio(struct file *file, struct folio *folio) { struct inode *inode = folio->mapping->host; int ret = -EAGAIN; trace_f2fs_readpage(folio, DATA); if (!f2fs_is_compress_backend_ready(inode)) { folio_unlock(folio); return -EOPNOTSUPP; } /* If the file has inline data, try to read it directly */ if (f2fs_has_inline_data(inode)) ret = f2fs_read_inline_data(inode, folio); if (ret == -EAGAIN) ret = f2fs_mpage_readpages(inode, NULL, folio); return ret; } static void f2fs_readahead(struct readahead_control *rac) { struct inode *inode = rac->mapping->host; trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac)); if (!f2fs_is_compress_backend_ready(inode)) return; /* If the file has inline data, skip readahead */ if (f2fs_has_inline_data(inode)) return; f2fs_mpage_readpages(inode, rac, NULL); } int f2fs_encrypt_one_page(struct f2fs_io_info *fio) { struct inode *inode = fio->page->mapping->host; struct page *mpage, *page; gfp_t gfp_flags = GFP_NOFS; if (!f2fs_encrypted_file(inode)) return 0; page = fio->compressed_page ? fio->compressed_page : fio->page; if (fscrypt_inode_uses_inline_crypto(inode)) return 0; retry_encrypt: fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page), PAGE_SIZE, 0, gfp_flags); if (IS_ERR(fio->encrypted_page)) { /* flush pending IOs and wait for a while in the ENOMEM case */ if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { f2fs_flush_merged_writes(fio->sbi); memalloc_retry_wait(GFP_NOFS); gfp_flags |= __GFP_NOFAIL; goto retry_encrypt; } return PTR_ERR(fio->encrypted_page); } mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr); if (mpage) { if (PageUptodate(mpage)) memcpy(page_address(mpage), page_address(fio->encrypted_page), PAGE_SIZE); f2fs_put_page(mpage, 1); } return 0; } static inline bool check_inplace_update_policy(struct inode *inode, struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) && is_inode_flag_set(inode, FI_OPU_WRITE)) return false; if (IS_F2FS_IPU_FORCE(sbi)) return true; if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi)) return true; if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util) return true; if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util) return true; /* * IPU for rewrite async pages */ if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE && !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode)) return true; /* this is only set during fdatasync */ if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU)) return true; if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) && !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) return true; return false; } bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio) { /* swap file is migrating in aligned write mode */ if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) return false; if (f2fs_is_pinned_file(inode)) return true; /* if this is cold file, we should overwrite to avoid fragmentation */ if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE)) return true; return check_inplace_update_policy(inode, fio); } bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); /* The below cases were checked when setting it. */ if (f2fs_is_pinned_file(inode)) return false; if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK)) return true; if (f2fs_lfs_mode(sbi)) return true; if (S_ISDIR(inode->i_mode)) return true; if (IS_NOQUOTA(inode)) return true; if (f2fs_used_in_atomic_write(inode)) return true; /* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */ if (f2fs_compressed_file(inode) && F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER && is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) return true; /* swap file is migrating in aligned write mode */ if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) return true; if (is_inode_flag_set(inode, FI_OPU_WRITE)) return true; if (fio) { if (page_private_gcing(fio->page)) return true; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) return true; } return false; } static inline bool need_inplace_update(struct f2fs_io_info *fio) { struct inode *inode = fio->page->mapping->host; if (f2fs_should_update_outplace(inode, fio)) return false; return f2fs_should_update_inplace(inode, fio); } int f2fs_do_write_data_page(struct f2fs_io_info *fio) { struct folio *folio = page_folio(fio->page); struct inode *inode = folio->mapping->host; struct dnode_of_data dn; struct node_info ni; bool ipu_force = false; bool atomic_commit; int err = 0; /* Use COW inode to make dnode_of_data for atomic write */ atomic_commit = f2fs_is_atomic_file(inode) && page_private_atomic(folio_page(folio, 0)); if (atomic_commit) set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0); else set_new_dnode(&dn, inode, NULL, NULL, 0); if (need_inplace_update(fio) && f2fs_lookup_read_extent_cache_block(inode, folio->index, &fio->old_blkaddr)) { if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, DATA_GENERIC_ENHANCE)) return -EFSCORRUPTED; ipu_force = true; fio->need_lock = LOCK_DONE; goto got_it; } /* Deadlock due to between page->lock and f2fs_lock_op */ if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi)) return -EAGAIN; err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE); if (err) goto out; fio->old_blkaddr = dn.data_blkaddr; /* This page is already truncated */ if (fio->old_blkaddr == NULL_ADDR) { folio_clear_uptodate(folio); clear_page_private_gcing(folio_page(folio, 0)); goto out_writepage; } got_it: if (__is_valid_data_blkaddr(fio->old_blkaddr) && !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, DATA_GENERIC_ENHANCE)) { err = -EFSCORRUPTED; goto out_writepage; } /* wait for GCed page writeback via META_MAPPING */ if (fio->meta_gc) f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); /* * If current allocation needs SSR, * it had better in-place writes for updated data. */ if (ipu_force || (__is_valid_data_blkaddr(fio->old_blkaddr) && need_inplace_update(fio))) { err = f2fs_encrypt_one_page(fio); if (err) goto out_writepage; folio_start_writeback(folio); f2fs_put_dnode(&dn); if (fio->need_lock == LOCK_REQ) f2fs_unlock_op(fio->sbi); err = f2fs_inplace_write_data(fio); if (err) { if (fscrypt_inode_uses_fs_layer_crypto(inode)) fscrypt_finalize_bounce_page(&fio->encrypted_page); folio_end_writeback(folio); } else { set_inode_flag(inode, FI_UPDATE_WRITE); } trace_f2fs_do_write_data_page(folio, IPU); return err; } if (fio->need_lock == LOCK_RETRY) { if (!f2fs_trylock_op(fio->sbi)) { err = -EAGAIN; goto out_writepage; } fio->need_lock = LOCK_REQ; } err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false); if (err) goto out_writepage; fio->version = ni.version; err = f2fs_encrypt_one_page(fio); if (err) goto out_writepage; folio_start_writeback(folio); if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR) f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false); /* LFS mode write path */ f2fs_outplace_write_data(&dn, fio); trace_f2fs_do_write_data_page(folio, OPU); set_inode_flag(inode, FI_APPEND_WRITE); if (atomic_commit) clear_page_private_atomic(folio_page(folio, 0)); out_writepage: f2fs_put_dnode(&dn); out: if (fio->need_lock == LOCK_REQ) f2fs_unlock_op(fio->sbi); return err; } int f2fs_write_single_data_page(struct folio *folio, int *submitted, struct bio **bio, sector_t *last_block, struct writeback_control *wbc, enum iostat_type io_type, int compr_blocks, bool allow_balance) { struct inode *inode = folio->mapping->host; struct page *page = folio_page(folio, 0); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); loff_t i_size = i_size_read(inode); const pgoff_t end_index = ((unsigned long long)i_size) >> PAGE_SHIFT; loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT; unsigned offset = 0; bool need_balance_fs = false; bool quota_inode = IS_NOQUOTA(inode); int err = 0; struct f2fs_io_info fio = { .sbi = sbi, .ino = inode->i_ino, .type = DATA, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), .old_blkaddr = NULL_ADDR, .page = page, .encrypted_page = NULL, .submitted = 0, .compr_blocks = compr_blocks, .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY, .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0, .io_type = io_type, .io_wbc = wbc, .bio = bio, .last_block = last_block, }; trace_f2fs_writepage(folio, DATA); /* we should bypass data pages to proceed the kworker jobs */ if (unlikely(f2fs_cp_error(sbi))) { mapping_set_error(folio->mapping, -EIO); /* * don't drop any dirty dentry pages for keeping lastest * directory structure. */ if (S_ISDIR(inode->i_mode) && !is_sbi_flag_set(sbi, SBI_IS_CLOSE)) goto redirty_out; /* keep data pages in remount-ro mode */ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) goto redirty_out; goto out; } if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; if (folio->index < end_index || f2fs_verity_in_progress(inode) || compr_blocks) goto write; /* * If the offset is out-of-range of file size, * this page does not have to be written to disk. */ offset = i_size & (PAGE_SIZE - 1); if ((folio->index >= end_index + 1) || !offset) goto out; folio_zero_segment(folio, offset, folio_size(folio)); write: /* Dentry/quota blocks are controlled by checkpoint */ if (S_ISDIR(inode->i_mode) || quota_inode) { /* * We need to wait for node_write to avoid block allocation during * checkpoint. This can only happen to quota writes which can cause * the below discard race condition. */ if (quota_inode) f2fs_down_read(&sbi->node_write); fio.need_lock = LOCK_DONE; err = f2fs_do_write_data_page(&fio); if (quota_inode) f2fs_up_read(&sbi->node_write); goto done; } if (!wbc->for_reclaim) need_balance_fs = true; else if (has_not_enough_free_secs(sbi, 0, 0)) goto redirty_out; else set_inode_flag(inode, FI_HOT_DATA); err = -EAGAIN; if (f2fs_has_inline_data(inode)) { err = f2fs_write_inline_data(inode, folio); if (!err) goto out; } if (err == -EAGAIN) { err = f2fs_do_write_data_page(&fio); if (err == -EAGAIN) { f2fs_bug_on(sbi, compr_blocks); fio.need_lock = LOCK_REQ; err = f2fs_do_write_data_page(&fio); } } if (err) { file_set_keep_isize(inode); } else { spin_lock(&F2FS_I(inode)->i_size_lock); if (F2FS_I(inode)->last_disk_size < psize) F2FS_I(inode)->last_disk_size = psize; spin_unlock(&F2FS_I(inode)->i_size_lock); } done: if (err && err != -ENOENT) goto redirty_out; out: inode_dec_dirty_pages(inode); if (err) { folio_clear_uptodate(folio); clear_page_private_gcing(page); } if (wbc->for_reclaim) { f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA); clear_inode_flag(inode, FI_HOT_DATA); f2fs_remove_dirty_inode(inode); submitted = NULL; } folio_unlock(folio); if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && !F2FS_I(inode)->wb_task && allow_balance) f2fs_balance_fs(sbi, need_balance_fs); if (unlikely(f2fs_cp_error(sbi))) { f2fs_submit_merged_write(sbi, DATA); if (bio && *bio) f2fs_submit_merged_ipu_write(sbi, bio, NULL); submitted = NULL; } if (submitted) *submitted = fio.submitted; return 0; redirty_out: folio_redirty_for_writepage(wbc, folio); /* * pageout() in MM translates EAGAIN, so calls handle_write_error() * -> mapping_set_error() -> set_bit(AS_EIO, ...). * file_write_and_wait_range() will see EIO error, which is critical * to return value of fsync() followed by atomic_write failure to user. */ if (!err || wbc->for_reclaim) return AOP_WRITEPAGE_ACTIVATE; folio_unlock(folio); return err; } /* * This function was copied from write_cache_pages from mm/page-writeback.c. * The major change is making write step of cold data page separately from * warm/hot data page. */ static int f2fs_write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, enum iostat_type io_type) { int ret = 0; int done = 0, retry = 0; struct page *pages_local[F2FS_ONSTACK_PAGES]; struct page **pages = pages_local; struct folio_batch fbatch; struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); struct bio *bio = NULL; sector_t last_block; #ifdef CONFIG_F2FS_FS_COMPRESSION struct inode *inode = mapping->host; struct compress_ctx cc = { .inode = inode, .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, .cluster_size = F2FS_I(inode)->i_cluster_size, .cluster_idx = NULL_CLUSTER, .rpages = NULL, .nr_rpages = 0, .cpages = NULL, .valid_nr_cpages = 0, .rbuf = NULL, .cbuf = NULL, .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size, .private = NULL, }; #endif int nr_folios, p, idx; int nr_pages; unsigned int max_pages = F2FS_ONSTACK_PAGES; pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; int range_whole = 0; xa_mark_t tag; int nwritten = 0; int submitted = 0; int i; #ifdef CONFIG_F2FS_FS_COMPRESSION if (f2fs_compressed_file(inode) && 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) { pages = f2fs_kzalloc(sbi, sizeof(struct page *) << cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL); max_pages = 1 << cc.log_cluster_size; } #endif folio_batch_init(&fbatch); if (get_dirty_pages(mapping->host) <= SM_I(F2FS_M_SB(mapping))->min_hot_blocks) set_inode_flag(mapping->host, FI_HOT_DATA); else clear_inode_flag(mapping->host, FI_HOT_DATA); if (wbc->range_cyclic) { index = mapping->writeback_index; /* prev offset */ end = -1; } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; retry: retry = 0; if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); done_index = index; while (!done && !retry && (index <= end)) { nr_pages = 0; again: nr_folios = filemap_get_folios_tag(mapping, &index, end, tag, &fbatch); if (nr_folios == 0) { if (nr_pages) goto write; break; } for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch.folios[i]; idx = 0; p = folio_nr_pages(folio); add_more: pages[nr_pages] = folio_page(folio, idx); folio_get(folio); if (++nr_pages == max_pages) { index = folio->index + idx + 1; folio_batch_release(&fbatch); goto write; } if (++idx < p) goto add_more; } folio_batch_release(&fbatch); goto again; write: for (i = 0; i < nr_pages; i++) { struct page *page = pages[i]; struct folio *folio = page_folio(page); bool need_readd; readd: need_readd = false; #ifdef CONFIG_F2FS_FS_COMPRESSION if (f2fs_compressed_file(inode)) { void *fsdata = NULL; struct page *pagep; int ret2; ret = f2fs_init_compress_ctx(&cc); if (ret) { done = 1; break; } if (!f2fs_cluster_can_merge_page(&cc, folio->index)) { ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type); if (!ret) need_readd = true; goto result; } if (unlikely(f2fs_cp_error(sbi))) goto lock_folio; if (!f2fs_cluster_is_empty(&cc)) goto lock_folio; if (f2fs_all_cluster_page_ready(&cc, pages, i, nr_pages, true)) goto lock_folio; ret2 = f2fs_prepare_compress_overwrite( inode, &pagep, folio->index, &fsdata); if (ret2 < 0) { ret = ret2; done = 1; break; } else if (ret2 && (!f2fs_compress_write_end(inode, fsdata, folio->index, 1) || !f2fs_all_cluster_page_ready(&cc, pages, i, nr_pages, false))) { retry = 1; break; } } #endif /* give a priority to WB_SYNC threads */ if (atomic_read(&sbi->wb_sync_req[DATA]) && wbc->sync_mode == WB_SYNC_NONE) { done = 1; break; } #ifdef CONFIG_F2FS_FS_COMPRESSION lock_folio: #endif done_index = folio->index; retry_write: folio_lock(folio); if (unlikely(folio->mapping != mapping)) { continue_unlock: folio_unlock(folio); continue; } if (!folio_test_dirty(folio)) { /* someone wrote it for us */ goto continue_unlock; } if (folio_test_writeback(folio)) { if (wbc->sync_mode == WB_SYNC_NONE) goto continue_unlock; f2fs_wait_on_page_writeback(&folio->page, DATA, true, true); } if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; #ifdef CONFIG_F2FS_FS_COMPRESSION if (f2fs_compressed_file(inode)) { folio_get(folio); f2fs_compress_ctx_add_page(&cc, folio); continue; } #endif submitted = 0; ret = f2fs_write_single_data_page(folio, &submitted, &bio, &last_block, wbc, io_type, 0, true); if (ret == AOP_WRITEPAGE_ACTIVATE) folio_unlock(folio); #ifdef CONFIG_F2FS_FS_COMPRESSION result: #endif nwritten += submitted; wbc->nr_to_write -= submitted; if (unlikely(ret)) { /* * keep nr_to_write, since vfs uses this to * get # of written pages. */ if (ret == AOP_WRITEPAGE_ACTIVATE) { ret = 0; goto next; } else if (ret == -EAGAIN) { ret = 0; if (wbc->sync_mode == WB_SYNC_ALL) { f2fs_io_schedule_timeout( DEFAULT_IO_TIMEOUT); goto retry_write; } goto next; } done_index = folio_next_index(folio); done = 1; break; } if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { done = 1; break; } next: if (need_readd) goto readd; } release_pages(pages, nr_pages); cond_resched(); } #ifdef CONFIG_F2FS_FS_COMPRESSION /* flush remained pages in compress cluster */ if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) { ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type); nwritten += submitted; wbc->nr_to_write -= submitted; if (ret) { done = 1; retry = 0; } } if (f2fs_compressed_file(inode)) f2fs_destroy_compress_ctx(&cc, false); #endif if (retry) { index = 0; end = -1; goto retry; } if (wbc->range_cyclic && !done) done_index = 0; if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; if (nwritten) f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host, NULL, 0, DATA); /* submit cached bio of IPU write */ if (bio) f2fs_submit_merged_ipu_write(sbi, &bio, NULL); #ifdef CONFIG_F2FS_FS_COMPRESSION if (pages != pages_local) kfree(pages); #endif return ret; } static inline bool __should_serialize_io(struct inode *inode, struct writeback_control *wbc) { /* to avoid deadlock in path of data flush */ if (F2FS_I(inode)->wb_task) return false; if (!S_ISREG(inode->i_mode)) return false; if (IS_NOQUOTA(inode)) return false; if (f2fs_need_compress_data(inode)) return true; if (wbc->sync_mode != WB_SYNC_ALL) return true; if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) return true; return false; } static int __f2fs_write_data_pages(struct address_space *mapping, struct writeback_control *wbc, enum iostat_type io_type) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct blk_plug plug; int ret; bool locked = false; /* skip writing if there is no dirty page in this inode */ if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) return 0; /* during POR, we don't need to trigger writepage at all. */ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto skip_write; if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) && wbc->sync_mode == WB_SYNC_NONE && get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && f2fs_available_free_memory(sbi, DIRTY_DENTS)) goto skip_write; /* skip writing in file defragment preparing stage */ if (is_inode_flag_set(inode, FI_SKIP_WRITES)) goto skip_write; trace_f2fs_writepages(mapping->host, wbc, DATA); /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */ if (wbc->sync_mode == WB_SYNC_ALL) atomic_inc(&sbi->wb_sync_req[DATA]); else if (atomic_read(&sbi->wb_sync_req[DATA])) { /* to avoid potential deadlock */ if (current->plug) blk_finish_plug(current->plug); goto skip_write; } if (__should_serialize_io(inode, wbc)) { mutex_lock(&sbi->writepages); locked = true; } blk_start_plug(&plug); ret = f2fs_write_cache_pages(mapping, wbc, io_type); blk_finish_plug(&plug); if (locked) mutex_unlock(&sbi->writepages); if (wbc->sync_mode == WB_SYNC_ALL) atomic_dec(&sbi->wb_sync_req[DATA]); /* * if some pages were truncated, we cannot guarantee its mapping->host * to detect pending bios. */ f2fs_remove_dirty_inode(inode); return ret; skip_write: wbc->pages_skipped += get_dirty_pages(inode); trace_f2fs_writepages(mapping->host, wbc, DATA); return 0; } static int f2fs_write_data_pages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; return __f2fs_write_data_pages(mapping, wbc, F2FS_I(inode)->cp_task == current ? FS_CP_DATA_IO : FS_DATA_IO); } void f2fs_write_failed(struct inode *inode, loff_t to) { loff_t i_size = i_size_read(inode); if (IS_NOQUOTA(inode)) return; /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */ if (to > i_size && !f2fs_verity_in_progress(inode)) { f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); truncate_pagecache(inode, i_size); f2fs_truncate_blocks(inode, i_size, true); filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); } } static int prepare_write_begin(struct f2fs_sb_info *sbi, struct folio *folio, loff_t pos, unsigned int len, block_t *blk_addr, bool *node_changed) { struct inode *inode = folio->mapping->host; pgoff_t index = folio->index; struct dnode_of_data dn; struct page *ipage; bool locked = false; int flag = F2FS_GET_BLOCK_PRE_AIO; int err = 0; /* * If a whole page is being written and we already preallocated all the * blocks, then there is no need to get a block address now. */ if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL)) return 0; /* f2fs_lock_op avoids race between write CP and convert_inline_page */ if (f2fs_has_inline_data(inode)) { if (pos + len > MAX_INLINE_DATA(inode)) flag = F2FS_GET_BLOCK_DEFAULT; f2fs_map_lock(sbi, flag); locked = true; } else if ((pos & PAGE_MASK) >= i_size_read(inode)) { f2fs_map_lock(sbi, flag); locked = true; } restart: /* check inline_data */ ipage = f2fs_get_inode_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto unlock_out; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (f2fs_has_inline_data(inode)) { if (pos + len <= MAX_INLINE_DATA(inode)) { f2fs_do_read_inline_data(folio, ipage); set_inode_flag(inode, FI_DATA_EXIST); if (inode->i_nlink) set_page_private_inline(ipage); goto out; } err = f2fs_convert_inline_page(&dn, folio_page(folio, 0)); if (err || dn.data_blkaddr != NULL_ADDR) goto out; } if (!f2fs_lookup_read_extent_cache_block(inode, index, &dn.data_blkaddr)) { if (IS_DEVICE_ALIASING(inode)) { err = -ENODATA; goto out; } if (locked) { err = f2fs_reserve_block(&dn, index); goto out; } /* hole case */ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); if (!err && dn.data_blkaddr != NULL_ADDR) goto out; f2fs_put_dnode(&dn); f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO); WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO); locked = true; goto restart; } out: if (!err) { /* convert_inline_page can make node_changed */ *blk_addr = dn.data_blkaddr; *node_changed = dn.node_changed; } f2fs_put_dnode(&dn); unlock_out: if (locked) f2fs_map_unlock(sbi, flag); return err; } static int __find_data_block(struct inode *inode, pgoff_t index, block_t *blk_addr) { struct dnode_of_data dn; struct page *ipage; int err = 0; ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino); if (IS_ERR(ipage)) return PTR_ERR(ipage); set_new_dnode(&dn, inode, ipage, ipage, 0); if (!f2fs_lookup_read_extent_cache_block(inode, index, &dn.data_blkaddr)) { /* hole case */ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err) { dn.data_blkaddr = NULL_ADDR; err = 0; } } *blk_addr = dn.data_blkaddr; f2fs_put_dnode(&dn); return err; } static int __reserve_data_block(struct inode *inode, pgoff_t index, block_t *blk_addr, bool *node_changed) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; struct page *ipage; int err = 0; f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO); ipage = f2fs_get_inode_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto unlock_out; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (!f2fs_lookup_read_extent_cache_block(dn.inode, index, &dn.data_blkaddr)) err = f2fs_reserve_block(&dn, index); *blk_addr = dn.data_blkaddr; *node_changed = dn.node_changed; f2fs_put_dnode(&dn); unlock_out: f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO); return err; } static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi, struct folio *folio, loff_t pos, unsigned int len, block_t *blk_addr, bool *node_changed, bool *use_cow) { struct inode *inode = folio->mapping->host; struct inode *cow_inode = F2FS_I(inode)->cow_inode; pgoff_t index = folio->index; int err = 0; block_t ori_blk_addr = NULL_ADDR; /* If pos is beyond the end of file, reserve a new block in COW inode */ if ((pos & PAGE_MASK) >= i_size_read(inode)) goto reserve_block; /* Look for the block in COW inode first */ err = __find_data_block(cow_inode, index, blk_addr); if (err) { return err; } else if (*blk_addr != NULL_ADDR) { *use_cow = true; return 0; } if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE)) goto reserve_block; /* Look for the block in the original inode */ err = __find_data_block(inode, index, &ori_blk_addr); if (err) return err; reserve_block: /* Finally, we should reserve a new block in COW inode for the update */ err = __reserve_data_block(cow_inode, index, blk_addr, node_changed); if (err) return err; inc_atomic_write_cnt(inode); if (ori_blk_addr != NULL_ADDR) *blk_addr = ori_blk_addr; return 0; } static int f2fs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct folio *folio; pgoff_t index = pos >> PAGE_SHIFT; bool need_balance = false; bool use_cow = false; block_t blkaddr = NULL_ADDR; int err = 0; trace_f2fs_write_begin(inode, pos, len); if (!f2fs_is_checkpoint_ready(sbi)) { err = -ENOSPC; goto fail; } /* * We should check this at this moment to avoid deadlock on inode page * and #0 page. The locking rule for inline_data conversion should be: * folio_lock(folio #0) -> folio_lock(inode_page) */ if (index != 0) { err = f2fs_convert_inline_inode(inode); if (err) goto fail; } #ifdef CONFIG_F2FS_FS_COMPRESSION if (f2fs_compressed_file(inode)) { int ret; struct page *page; *fsdata = NULL; if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode))) goto repeat; ret = f2fs_prepare_compress_overwrite(inode, &page, index, fsdata); if (ret < 0) { err = ret; goto fail; } else if (ret) { *foliop = page_folio(page); return 0; } } #endif repeat: /* * Do not use FGP_STABLE to avoid deadlock. * Will wait that below with our IO control. */ folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS); if (IS_ERR(folio)) { err = PTR_ERR(folio); goto fail; } /* TODO: cluster can be compressed due to race with .writepage */ *foliop = folio; if (f2fs_is_atomic_file(inode)) err = prepare_atomic_write_begin(sbi, folio, pos, len, &blkaddr, &need_balance, &use_cow); else err = prepare_write_begin(sbi, folio, pos, len, &blkaddr, &need_balance); if (err) goto put_folio; if (need_balance && !IS_NOQUOTA(inode) && has_not_enough_free_secs(sbi, 0, 0)) { folio_unlock(folio); f2fs_balance_fs(sbi, true); folio_lock(folio); if (folio->mapping != mapping) { /* The folio got truncated from under us */ folio_unlock(folio); folio_put(folio); goto repeat; } } f2fs_wait_on_page_writeback(&folio->page, DATA, false, true); if (len == folio_size(folio) || folio_test_uptodate(folio)) return 0; if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) && !f2fs_verity_in_progress(inode)) { folio_zero_segment(folio, len, folio_size(folio)); return 0; } if (blkaddr == NEW_ADDR) { folio_zero_segment(folio, 0, folio_size(folio)); folio_mark_uptodate(folio); } else { if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ)) { err = -EFSCORRUPTED; goto put_folio; } err = f2fs_submit_page_read(use_cow ? F2FS_I(inode)->cow_inode : inode, folio, blkaddr, 0, true); if (err) goto put_folio; folio_lock(folio); if (unlikely(folio->mapping != mapping)) { folio_unlock(folio); folio_put(folio); goto repeat; } if (unlikely(!folio_test_uptodate(folio))) { err = -EIO; goto put_folio; } } return 0; put_folio: folio_unlock(folio); folio_put(folio); fail: f2fs_write_failed(inode, pos + len); return err; } static int f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { struct inode *inode = folio->mapping->host; trace_f2fs_write_end(inode, pos, len, copied); /* * This should be come from len == PAGE_SIZE, and we expect copied * should be PAGE_SIZE. Otherwise, we treat it with zero copied and * let generic_perform_write() try to copy data again through copied=0. */ if (!folio_test_uptodate(folio)) { if (unlikely(copied != len)) copied = 0; else folio_mark_uptodate(folio); } #ifdef CONFIG_F2FS_FS_COMPRESSION /* overwrite compressed file */ if (f2fs_compressed_file(inode) && fsdata) { f2fs_compress_write_end(inode, fsdata, folio->index, copied); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); if (pos + copied > i_size_read(inode) && !f2fs_verity_in_progress(inode)) f2fs_i_size_write(inode, pos + copied); return copied; } #endif if (!copied) goto unlock_out; folio_mark_dirty(folio); if (f2fs_is_atomic_file(inode)) set_page_private_atomic(folio_page(folio, 0)); if (pos + copied > i_size_read(inode) && !f2fs_verity_in_progress(inode)) { f2fs_i_size_write(inode, pos + copied); if (f2fs_is_atomic_file(inode)) f2fs_i_size_write(F2FS_I(inode)->cow_inode, pos + copied); } unlock_out: folio_unlock(folio); folio_put(folio); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return copied; } void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct inode *inode = folio->mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (inode->i_ino >= F2FS_ROOT_INO(sbi) && (offset || length != folio_size(folio))) return; if (folio_test_dirty(folio)) { if (inode->i_ino == F2FS_META_INO(sbi)) { dec_page_count(sbi, F2FS_DIRTY_META); } else if (inode->i_ino == F2FS_NODE_INO(sbi)) { dec_page_count(sbi, F2FS_DIRTY_NODES); } else { inode_dec_dirty_pages(inode); f2fs_remove_dirty_inode(inode); } } clear_page_private_all(&folio->page); } bool f2fs_release_folio(struct folio *folio, gfp_t wait) { /* If this is dirty folio, keep private data */ if (folio_test_dirty(folio)) return false; clear_page_private_all(&folio->page); return true; } static bool f2fs_dirty_data_folio(struct address_space *mapping, struct folio *folio) { struct inode *inode = mapping->host; trace_f2fs_set_page_dirty(folio, DATA); if (!folio_test_uptodate(folio)) folio_mark_uptodate(folio); BUG_ON(folio_test_swapcache(folio)); if (filemap_dirty_folio(mapping, folio)) { f2fs_update_dirty_folio(inode, folio); return true; } return false; } static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block) { #ifdef CONFIG_F2FS_FS_COMPRESSION struct dnode_of_data dn; sector_t start_idx, blknr = 0; int ret; start_idx = round_down(block, F2FS_I(inode)->i_cluster_size); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); if (ret) return 0; if (dn.data_blkaddr != COMPRESS_ADDR) { dn.ofs_in_node += block - start_idx; blknr = f2fs_data_blkaddr(&dn); if (!__is_valid_data_blkaddr(blknr)) blknr = 0; } f2fs_put_dnode(&dn); return blknr; #else return 0; #endif } static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; sector_t blknr = 0; if (f2fs_has_inline_data(inode)) goto out; /* make sure allocating whole blocks */ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) filemap_write_and_wait(mapping); /* Block number less than F2FS MAX BLOCKS */ if (unlikely(block >= max_file_blocks(inode))) goto out; if (f2fs_compressed_file(inode)) { blknr = f2fs_bmap_compress(inode, block); } else { struct f2fs_map_blocks map; memset(&map, 0, sizeof(map)); map.m_lblk = block; map.m_len = 1; map.m_next_pgofs = NULL; map.m_seg_type = NO_CHECK_TYPE; if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP)) blknr = map.m_pblk; } out: trace_f2fs_bmap(inode, block, blknr); return blknr; } #ifdef CONFIG_SWAP static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, unsigned int blkcnt) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int blkofs; unsigned int blk_per_sec = BLKS_PER_SEC(sbi); unsigned int end_blk = start_blk + blkcnt - 1; unsigned int secidx = start_blk / blk_per_sec; unsigned int end_sec; int ret = 0; if (!blkcnt) return 0; end_sec = end_blk / blk_per_sec; f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); set_inode_flag(inode, FI_ALIGNED_WRITE); set_inode_flag(inode, FI_OPU_WRITE); for (; secidx <= end_sec; secidx++) { unsigned int blkofs_end = secidx == end_sec ? end_blk % blk_per_sec : blk_per_sec - 1; f2fs_down_write(&sbi->pin_sem); ret = f2fs_allocate_pinning_section(sbi); if (ret) { f2fs_up_write(&sbi->pin_sem); break; } set_inode_flag(inode, FI_SKIP_WRITES); for (blkofs = 0; blkofs <= blkofs_end; blkofs++) { struct page *page; unsigned int blkidx = secidx * blk_per_sec + blkofs; page = f2fs_get_lock_data_page(inode, blkidx, true); if (IS_ERR(page)) { f2fs_up_write(&sbi->pin_sem); ret = PTR_ERR(page); goto done; } set_page_dirty(page); f2fs_put_page(page, 1); } clear_inode_flag(inode, FI_SKIP_WRITES); ret = filemap_fdatawrite(inode->i_mapping); f2fs_up_write(&sbi->pin_sem); if (ret) break; } done: clear_inode_flag(inode, FI_SKIP_WRITES); clear_inode_flag(inode, FI_OPU_WRITE); clear_inode_flag(inode, FI_ALIGNED_WRITE); filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); return ret; } static int check_swap_activate(struct swap_info_struct *sis, struct file *swap_file, sector_t *span) { struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); block_t cur_lblock; block_t last_lblock; block_t pblock; block_t lowest_pblock = -1; block_t highest_pblock = 0; int nr_extents = 0; unsigned int nr_pblocks; unsigned int blks_per_sec = BLKS_PER_SEC(sbi); unsigned int not_aligned = 0; int ret = 0; /* * Map all the blocks into the extent list. This code doesn't try * to be very smart. */ cur_lblock = 0; last_lblock = F2FS_BYTES_TO_BLK(i_size_read(inode)); while (cur_lblock < last_lblock && cur_lblock < sis->max) { struct f2fs_map_blocks map; retry: cond_resched(); memset(&map, 0, sizeof(map)); map.m_lblk = cur_lblock; map.m_len = last_lblock - cur_lblock; map.m_next_pgofs = NULL; map.m_next_extent = NULL; map.m_seg_type = NO_CHECK_TYPE; map.m_may_create = false; ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP); if (ret) goto out; /* hole */ if (!(map.m_flags & F2FS_MAP_FLAGS)) { f2fs_err(sbi, "Swapfile has holes"); ret = -EINVAL; goto out; } pblock = map.m_pblk; nr_pblocks = map.m_len; if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec || nr_pblocks % blks_per_sec || !f2fs_valid_pinned_area(sbi, pblock)) { bool last_extent = false; not_aligned++; nr_pblocks = roundup(nr_pblocks, blks_per_sec); if (cur_lblock + nr_pblocks > sis->max) nr_pblocks -= blks_per_sec; /* this extent is last one */ if (!nr_pblocks) { nr_pblocks = last_lblock - cur_lblock; last_extent = true; } ret = f2fs_migrate_blocks(inode, cur_lblock, nr_pblocks); if (ret) { if (ret == -ENOENT) ret = -EINVAL; goto out; } if (!last_extent) goto retry; } if (cur_lblock + nr_pblocks >= sis->max) nr_pblocks = sis->max - cur_lblock; if (cur_lblock) { /* exclude the header page */ if (pblock < lowest_pblock) lowest_pblock = pblock; if (pblock + nr_pblocks - 1 > highest_pblock) highest_pblock = pblock + nr_pblocks - 1; } /* * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks */ ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock); if (ret < 0) goto out; nr_extents += ret; cur_lblock += nr_pblocks; } ret = nr_extents; *span = 1 + highest_pblock - lowest_pblock; if (cur_lblock == 0) cur_lblock = 1; /* force Empty message */ sis->max = cur_lblock; sis->pages = cur_lblock - 1; out: if (not_aligned) f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)", not_aligned, blks_per_sec * F2FS_BLKSIZE); return ret; } static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file, sector_t *span) { struct inode *inode = file_inode(file); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int ret; if (!S_ISREG(inode->i_mode)) return -EINVAL; if (f2fs_readonly(sbi->sb)) return -EROFS; if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) { f2fs_err(sbi, "Swapfile not supported in LFS mode"); return -EINVAL; } ret = f2fs_convert_inline_inode(inode); if (ret) return ret; if (!f2fs_disable_compressed_file(inode)) return -EINVAL; ret = filemap_fdatawrite(inode->i_mapping); if (ret < 0) return ret; f2fs_precache_extents(inode); ret = check_swap_activate(sis, file, span); if (ret < 0) return ret; stat_inc_swapfile_inode(inode); set_inode_flag(inode, FI_PIN_FILE); f2fs_update_time(sbi, REQ_TIME); return ret; } static void f2fs_swap_deactivate(struct file *file) { struct inode *inode = file_inode(file); stat_dec_swapfile_inode(inode); clear_inode_flag(inode, FI_PIN_FILE); } #else static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file, sector_t *span) { return -EOPNOTSUPP; } static void f2fs_swap_deactivate(struct file *file) { } #endif const struct address_space_operations f2fs_dblock_aops = { .read_folio = f2fs_read_data_folio, .readahead = f2fs_readahead, .writepages = f2fs_write_data_pages, .write_begin = f2fs_write_begin, .write_end = f2fs_write_end, .dirty_folio = f2fs_dirty_data_folio, .migrate_folio = filemap_migrate_folio, .invalidate_folio = f2fs_invalidate_folio, .release_folio = f2fs_release_folio, .bmap = f2fs_bmap, .swap_activate = f2fs_swap_activate, .swap_deactivate = f2fs_swap_deactivate, }; void f2fs_clear_page_cache_dirty_tag(struct folio *folio) { struct address_space *mapping = folio->mapping; unsigned long flags; xa_lock_irqsave(&mapping->i_pages, flags); __xa_clear_mark(&mapping->i_pages, folio->index, PAGECACHE_TAG_DIRTY); xa_unlock_irqrestore(&mapping->i_pages, flags); } int __init f2fs_init_post_read_processing(void) { bio_post_read_ctx_cache = kmem_cache_create("f2fs_bio_post_read_ctx", sizeof(struct bio_post_read_ctx), 0, 0, NULL); if (!bio_post_read_ctx_cache) goto fail; bio_post_read_ctx_pool = mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS, bio_post_read_ctx_cache); if (!bio_post_read_ctx_pool) goto fail_free_cache; return 0; fail_free_cache: kmem_cache_destroy(bio_post_read_ctx_cache); fail: return -ENOMEM; } void f2fs_destroy_post_read_processing(void) { mempool_destroy(bio_post_read_ctx_pool); kmem_cache_destroy(bio_post_read_ctx_cache); } int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi) { if (!f2fs_sb_has_encrypt(sbi) && !f2fs_sb_has_verity(sbi) && !f2fs_sb_has_compression(sbi)) return 0; sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq", WQ_UNBOUND | WQ_HIGHPRI, num_online_cpus()); return sbi->post_read_wq ? 0 : -ENOMEM; } void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi) { if (sbi->post_read_wq) destroy_workqueue(sbi->post_read_wq); } int __init f2fs_init_bio_entry_cache(void) { bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab", sizeof(struct bio_entry)); return bio_entry_slab ? 0 : -ENOMEM; } void f2fs_destroy_bio_entry_cache(void) { kmem_cache_destroy(bio_entry_slab); } static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { struct f2fs_map_blocks map = {}; pgoff_t next_pgofs = 0; int err; map.m_lblk = F2FS_BYTES_TO_BLK(offset); map.m_len = F2FS_BYTES_TO_BLK(offset + length - 1) - map.m_lblk + 1; map.m_next_pgofs = &next_pgofs; map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode), inode->i_write_hint); /* * If the blocks being overwritten are already allocated, * f2fs_map_lock and f2fs_balance_fs are not necessary. */ if ((flags & IOMAP_WRITE) && !f2fs_overwrite_io(inode, offset, length)) map.m_may_create = true; err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO); if (err) return err; iomap->offset = F2FS_BLK_TO_BYTES(map.m_lblk); /* * When inline encryption is enabled, sometimes I/O to an encrypted file * has to be broken up to guarantee DUN contiguity. Handle this by * limiting the length of the mapping returned. */ map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len); /* * We should never see delalloc or compressed extents here based on * prior flushing and checks. */ if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR)) return -EINVAL; if (map.m_flags & F2FS_MAP_MAPPED) { if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR)) return -EINVAL; iomap->length = F2FS_BLK_TO_BYTES(map.m_len); iomap->type = IOMAP_MAPPED; iomap->flags |= IOMAP_F_MERGED; iomap->bdev = map.m_bdev; iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk); } else { if (flags & IOMAP_WRITE) return -ENOTBLK; if (map.m_pblk == NULL_ADDR) { iomap->length = F2FS_BLK_TO_BYTES(next_pgofs) - iomap->offset; iomap->type = IOMAP_HOLE; } else if (map.m_pblk == NEW_ADDR) { iomap->length = F2FS_BLK_TO_BYTES(map.m_len); iomap->type = IOMAP_UNWRITTEN; } else { f2fs_bug_on(F2FS_I_SB(inode), 1); } iomap->addr = IOMAP_NULL_ADDR; } if (map.m_flags & F2FS_MAP_NEW) iomap->flags |= IOMAP_F_NEW; if ((inode->i_state & I_DIRTY_DATASYNC) || offset + length > i_size_read(inode)) iomap->flags |= IOMAP_F_DIRTY; return 0; } const struct iomap_ops f2fs_iomap_ops = { .iomap_begin = f2fs_iomap_begin, }; |
1 1 4 4 2 3 3 2 2 1 1 1 1 1 4 9 9 6 1 2 1 11 11 11 10 10 9 8 1 9 9 9 9 7 9 1 9 9 8 9 9 9 9 3 9 9 9 9 9 9 2 3 3 9 10 4 4 1 3 2 3 1 1 1 3 5 3 4 3 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_sb.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_quota.h" #include "xfs_qm.h" #include "xfs_icache.h" int xfs_qm_scall_quotaoff( xfs_mount_t *mp, uint flags) { /* * No file system can have quotas enabled on disk but not in core. * Note that quota utilities (like quotaoff) _expect_ * errno == -EEXIST here. */ if ((mp->m_qflags & flags) == 0) return -EEXIST; /* * We do not support actually turning off quota accounting any more. * Just log a warning and ignore the accounting related flags. */ if (flags & XFS_ALL_QUOTA_ACCT) xfs_info(mp, "disabling of quota accounting not supported."); mutex_lock(&mp->m_quotainfo->qi_quotaofflock); mp->m_qflags &= ~(flags & XFS_ALL_QUOTA_ENFD); spin_lock(&mp->m_sb_lock); mp->m_sb.sb_qflags = mp->m_qflags; spin_unlock(&mp->m_sb_lock); mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); /* XXX what to do if error ? Revert back to old vals incore ? */ return xfs_sync_sb(mp, false); } STATIC int xfs_qm_scall_trunc_qfile( struct xfs_mount *mp, xfs_dqtype_t type) { struct xfs_inode *ip; struct xfs_trans *tp; int error; error = xfs_qm_qino_load(mp, type, &ip); if (error == -ENOENT) return 0; if (error) return error; xfs_ilock(ip, XFS_IOLOCK_EXCL); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); if (error) { xfs_iunlock(ip, XFS_IOLOCK_EXCL); goto out_put; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); ip->i_disk_size = 0; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); if (error) { xfs_trans_cancel(tp); goto out_unlock; } ASSERT(ip->i_df.if_nextents == 0); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); error = xfs_trans_commit(tp); out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); out_put: xfs_irele(ip); return error; } int xfs_qm_scall_trunc_qfiles( xfs_mount_t *mp, uint flags) { int error = -EINVAL; if (!xfs_has_quota(mp) || flags == 0 || (flags & ~XFS_QMOPT_QUOTALL)) { xfs_debug(mp, "%s: flags=%x m_qflags=%x", __func__, flags, mp->m_qflags); return -EINVAL; } if (flags & XFS_QMOPT_UQUOTA) { error = xfs_qm_scall_trunc_qfile(mp, XFS_DQTYPE_USER); if (error) return error; } if (flags & XFS_QMOPT_GQUOTA) { error = xfs_qm_scall_trunc_qfile(mp, XFS_DQTYPE_GROUP); if (error) return error; } if (flags & XFS_QMOPT_PQUOTA) error = xfs_qm_scall_trunc_qfile(mp, XFS_DQTYPE_PROJ); return error; } /* * Switch on (a given) quota enforcement for a filesystem. This takes * effect immediately. * (Switching on quota accounting must be done at mount time.) */ int xfs_qm_scall_quotaon( xfs_mount_t *mp, uint flags) { int error; uint qf; /* * Switching on quota accounting must be done at mount time, * only consider quota enforcement stuff here. */ flags &= XFS_ALL_QUOTA_ENFD; if (flags == 0) { xfs_debug(mp, "%s: zero flags, m_qflags=%x", __func__, mp->m_qflags); return -EINVAL; } /* * Can't enforce without accounting. We check the superblock * qflags here instead of m_qflags because rootfs can have * quota acct on ondisk without m_qflags' knowing. */ if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && (flags & XFS_UQUOTA_ENFD)) || ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && (flags & XFS_GQUOTA_ENFD)) || ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && (flags & XFS_PQUOTA_ENFD))) { xfs_debug(mp, "%s: Can't enforce without acct, flags=%x sbflags=%x", __func__, flags, mp->m_sb.sb_qflags); return -EINVAL; } /* * If everything's up to-date incore, then don't waste time. */ if ((mp->m_qflags & flags) == flags) return -EEXIST; /* * Change sb_qflags on disk but not incore mp->qflags * if this is the root filesystem. */ spin_lock(&mp->m_sb_lock); qf = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = qf | flags; spin_unlock(&mp->m_sb_lock); /* * There's nothing to change if it's the same. */ if ((qf & flags) == flags) return -EEXIST; error = xfs_sync_sb(mp, false); if (error) return error; /* * If we aren't trying to switch on quota enforcement, we are done. */ if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != (mp->m_qflags & XFS_UQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != (mp->m_qflags & XFS_PQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != (mp->m_qflags & XFS_GQUOTA_ACCT))) return 0; if (!XFS_IS_QUOTA_ON(mp)) return -ESRCH; /* * Switch on quota enforcement in core. */ mutex_lock(&mp->m_quotainfo->qi_quotaofflock); mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); return 0; } #define XFS_QC_MASK (QC_LIMIT_MASK | QC_TIMER_MASK) /* * Adjust limits of this quota, and the defaults if passed in. Returns true * if the new limits made sense and were applied, false otherwise. */ static inline bool xfs_setqlim_limits( struct xfs_mount *mp, struct xfs_dquot_res *res, struct xfs_quota_limits *qlim, xfs_qcnt_t hard, xfs_qcnt_t soft, const char *tag) { /* The hard limit can't be less than the soft limit. */ if (hard != 0 && hard < soft) { xfs_debug(mp, "%shard %lld < %ssoft %lld", tag, hard, tag, soft); return false; } res->hardlimit = hard; res->softlimit = soft; if (qlim) { qlim->hard = hard; qlim->soft = soft; } return true; } static inline void xfs_setqlim_timer( struct xfs_mount *mp, struct xfs_dquot_res *res, struct xfs_quota_limits *qlim, s64 timer) { if (qlim) { /* Set the length of the default grace period. */ res->timer = xfs_dquot_set_grace_period(timer); qlim->time = res->timer; } else { /* Set the grace period expiration on a quota. */ res->timer = xfs_dquot_set_timeout(mp, timer); } } /* * Adjust quota limits, and start/stop timers accordingly. */ int xfs_qm_scall_setqlim( struct xfs_mount *mp, xfs_dqid_t id, xfs_dqtype_t type, struct qc_dqblk *newlim) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_dquot *dqp; struct xfs_trans *tp; struct xfs_def_quota *defq; struct xfs_dquot_res *res; struct xfs_quota_limits *qlim; int error; xfs_qcnt_t hard, soft; if (newlim->d_fieldmask & ~XFS_QC_MASK) return -EINVAL; if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) return 0; /* * Get the dquot (locked) before we start, as we need to do a * transaction to allocate it if it doesn't exist. Once we have the * dquot, unlock it so we can start the next transaction safely. We hold * a reference to the dquot, so it's safe to do this unlock/lock without * it being reclaimed in the mean time. */ error = xfs_qm_dqget(mp, id, type, true, &dqp); if (error) { ASSERT(error != -ENOENT); return error; } defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); xfs_dqunlock(dqp); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp); if (error) goto out_rele; xfs_dqlock(dqp); xfs_trans_dqjoin(tp, dqp); /* * Update quota limits, warnings, and timers, and the defaults * if we're touching id == 0. * * Make sure that hardlimits are >= soft limits before changing. * * Update warnings counter(s) if requested. * * Timelimits for the super user set the relative time the other users * can be over quota for this file system. If it is zero a default is * used. Ditto for the default soft and hard limit values (already * done, above), and for warnings. * * For other IDs, userspace can bump out the grace period if over * the soft limit. */ /* Blocks on the data device. */ hard = (newlim->d_fieldmask & QC_SPC_HARD) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : dqp->q_blk.hardlimit; soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : dqp->q_blk.softlimit; res = &dqp->q_blk; qlim = id == 0 ? &defq->blk : NULL; if (xfs_setqlim_limits(mp, res, qlim, hard, soft, "blk")) xfs_dquot_set_prealloc_limits(dqp); if (newlim->d_fieldmask & QC_SPC_TIMER) xfs_setqlim_timer(mp, res, qlim, newlim->d_spc_timer); /* Blocks on the realtime device. */ hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : dqp->q_rtb.hardlimit; soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : dqp->q_rtb.softlimit; res = &dqp->q_rtb; qlim = id == 0 ? &defq->rtb : NULL; xfs_setqlim_limits(mp, res, qlim, hard, soft, "rtb"); if (newlim->d_fieldmask & QC_RT_SPC_TIMER) xfs_setqlim_timer(mp, res, qlim, newlim->d_rt_spc_timer); /* Inodes */ hard = (newlim->d_fieldmask & QC_INO_HARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : dqp->q_ino.hardlimit; soft = (newlim->d_fieldmask & QC_INO_SOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : dqp->q_ino.softlimit; res = &dqp->q_ino; qlim = id == 0 ? &defq->ino : NULL; xfs_setqlim_limits(mp, res, qlim, hard, soft, "ino"); if (newlim->d_fieldmask & QC_INO_TIMER) xfs_setqlim_timer(mp, res, qlim, newlim->d_ino_timer); if (id != 0) { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(dqp); } dqp->q_flags |= XFS_DQFLAG_DIRTY; xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp); out_rele: xfs_qm_dqrele(dqp); return error; } /* Fill out the quota context. */ static void xfs_qm_scall_getquota_fill_qc( struct xfs_mount *mp, xfs_dqtype_t type, const struct xfs_dquot *dqp, struct qc_dqblk *dst) { memset(dst, 0, sizeof(*dst)); dst->d_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_blk.hardlimit); dst->d_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_blk.softlimit); dst->d_ino_hardlimit = dqp->q_ino.hardlimit; dst->d_ino_softlimit = dqp->q_ino.softlimit; dst->d_space = XFS_FSB_TO_B(mp, dqp->q_blk.reserved); dst->d_ino_count = dqp->q_ino.reserved; dst->d_spc_timer = dqp->q_blk.timer; dst->d_ino_timer = dqp->q_ino.timer; dst->d_ino_warns = 0; dst->d_spc_warns = 0; dst->d_rt_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.hardlimit); dst->d_rt_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.softlimit); dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_rtb.reserved); dst->d_rt_spc_timer = dqp->q_rtb.timer; dst->d_rt_spc_warns = 0; /* * Internally, we don't reset all the timers when quota enforcement * gets turned off. No need to confuse the user level code, * so return zeroes in that case. */ if (!xfs_dquot_is_enforced(dqp)) { dst->d_spc_timer = 0; dst->d_ino_timer = 0; dst->d_rt_spc_timer = 0; } } /* Return the quota information for the dquot matching id. */ int xfs_qm_scall_getquota( struct xfs_mount *mp, xfs_dqid_t id, xfs_dqtype_t type, struct qc_dqblk *dst) { struct xfs_dquot *dqp; int error; /* * Expedite pending inodegc work at the start of a quota reporting * scan but don't block waiting for it to complete. */ if (id == 0) xfs_inodegc_push(mp); /* * Try to get the dquot. We don't want it allocated on disk, so don't * set doalloc. If it doesn't exist, we'll get ENOENT back. */ error = xfs_qm_dqget(mp, id, type, false, &dqp); if (error) return error; /* * If everything's NULL, this dquot doesn't quite exist as far as * our utility programs are concerned. */ if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { error = -ENOENT; goto out_put; } xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst); out_put: xfs_qm_dqput(dqp); return error; } /* * Return the quota information for the first initialized dquot whose id * is at least as high as id. */ int xfs_qm_scall_getquota_next( struct xfs_mount *mp, xfs_dqid_t *id, xfs_dqtype_t type, struct qc_dqblk *dst) { struct xfs_dquot *dqp; int error; /* Flush inodegc work at the start of a quota reporting scan. */ if (*id == 0) xfs_inodegc_push(mp); error = xfs_qm_dqget_next(mp, *id, type, &dqp); if (error) return error; /* Fill in the ID we actually read from disk */ *id = dqp->q_id; xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst); xfs_qm_dqput(dqp); return error; } |
8 8 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 | /* * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com * * Copyright 2008 Ben Gamari <bgamari@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/debugfs.h> #include <linux/export.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <drm/drm_atomic.h> #include <drm/drm_auth.h> #include <drm/drm_bridge.h> #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> #include <drm/drm_managed.h> #include <drm/drm_gpuvm.h> #include "drm_crtc_internal.h" #include "drm_internal.h" /*************************************************** * Initialization, etc. **************************************************/ static int drm_name_info(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; struct drm_device *dev = entry->dev; struct drm_master *master; mutex_lock(&dev->master_mutex); master = dev->master; seq_printf(m, "%s", dev->driver->name); if (dev->dev) seq_printf(m, " dev=%s", dev_name(dev->dev)); if (master && master->unique) seq_printf(m, " master=%s", master->unique); if (dev->unique) seq_printf(m, " unique=%s", dev->unique); seq_printf(m, "\n"); mutex_unlock(&dev->master_mutex); return 0; } static int drm_clients_info(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; struct drm_device *dev = entry->dev; struct drm_file *priv; kuid_t uid; seq_printf(m, "%20s %5s %3s master a %5s %10s %*s\n", "command", "tgid", "dev", "uid", "magic", DRM_CLIENT_NAME_MAX_LEN, "name"); /* dev->filelist is sorted youngest first, but we want to present * oldest first (i.e. kernel, servers, clients), so walk backwardss. */ mutex_lock(&dev->filelist_mutex); list_for_each_entry_reverse(priv, &dev->filelist, lhead) { bool is_current_master = drm_is_current_master(priv); struct task_struct *task; struct pid *pid; mutex_lock(&priv->client_name_lock); rcu_read_lock(); /* Locks priv->pid and pid_task()->comm! */ pid = rcu_dereference(priv->pid); task = pid_task(pid, PIDTYPE_TGID); uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID; seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s\n", task ? task->comm : "<unknown>", pid_vnr(pid), priv->minor->index, is_current_master ? 'y' : 'n', priv->authenticated ? 'y' : 'n', from_kuid_munged(seq_user_ns(m), uid), priv->magic, DRM_CLIENT_NAME_MAX_LEN, priv->client_name ? priv->client_name : "<unset>"); rcu_read_unlock(); mutex_unlock(&priv->client_name_lock); } mutex_unlock(&dev->filelist_mutex); return 0; } static int drm_gem_one_name_info(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; struct seq_file *m = data; seq_printf(m, "%6d %8zd %7d %8d\n", obj->name, obj->size, obj->handle_count, kref_read(&obj->refcount)); return 0; } static int drm_gem_name_info(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; struct drm_device *dev = entry->dev; seq_printf(m, " name size handles refcount\n"); mutex_lock(&dev->object_name_lock); idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m); mutex_unlock(&dev->object_name_lock); return 0; } static const struct drm_debugfs_info drm_debugfs_list[] = { {"name", drm_name_info, 0}, {"clients", drm_clients_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, }; #define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list) static int drm_debugfs_open(struct inode *inode, struct file *file) { struct drm_info_node *node = inode->i_private; if (!device_is_registered(node->minor->kdev)) return -ENODEV; return single_open(file, node->info_ent->show, node); } static int drm_debugfs_entry_open(struct inode *inode, struct file *file) { struct drm_debugfs_entry *entry = inode->i_private; struct drm_debugfs_info *node = &entry->file; struct drm_minor *minor = entry->dev->primary ?: entry->dev->accel; if (!device_is_registered(minor->kdev)) return -ENODEV; return single_open(file, node->show, entry); } static const struct file_operations drm_debugfs_entry_fops = { .owner = THIS_MODULE, .open = drm_debugfs_entry_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations drm_debugfs_fops = { .owner = THIS_MODULE, .open = drm_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /** * drm_debugfs_gpuva_info - dump the given DRM GPU VA space * @m: pointer to the &seq_file to write * @gpuvm: the &drm_gpuvm representing the GPU VA space * * Dumps the GPU VA mappings of a given DRM GPU VA manager. * * For each DRM GPU VA space drivers should call this function from their * &drm_info_list's show callback. * * Returns: 0 on success, -ENODEV if the &gpuvm is not initialized */ int drm_debugfs_gpuva_info(struct seq_file *m, struct drm_gpuvm *gpuvm) { struct drm_gpuva *va, *kva = &gpuvm->kernel_alloc_node; if (!gpuvm->name) return -ENODEV; seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n", gpuvm->name, gpuvm->mm_start, gpuvm->mm_start + gpuvm->mm_range); seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n", kva->va.addr, kva->va.addr + kva->va.range); seq_puts(m, "\n"); seq_puts(m, " VAs | start | range | end | object | object offset\n"); seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n"); drm_gpuvm_for_each_va(va, gpuvm) { if (unlikely(va == kva)) continue; seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx\n", va->va.addr, va->va.range, va->va.addr + va->va.range, (u64)(uintptr_t)va->gem.obj, va->gem.offset); } return 0; } EXPORT_SYMBOL(drm_debugfs_gpuva_info); /** * drm_debugfs_create_files - Initialize a given set of debugfs files for DRM * minor * @files: The array of files to create * @count: The number of files given * @root: DRI debugfs dir entry. * @minor: device minor number * * Create a given set of debugfs files represented by an array of * &struct drm_info_list in the given root directory. These files will be removed * automatically on drm_debugfs_dev_fini(). */ void drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, struct drm_minor *minor) { struct drm_device *dev = minor->dev; struct drm_info_node *tmp; int i; for (i = 0; i < count; i++) { u32 features = files[i].driver_features; if (features && !drm_core_check_all_features(dev, features)) continue; tmp = drmm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL); if (tmp == NULL) continue; tmp->minor = minor; tmp->dent = debugfs_create_file(files[i].name, 0444, root, tmp, &drm_debugfs_fops); tmp->info_ent = &files[i]; } } EXPORT_SYMBOL(drm_debugfs_create_files); int drm_debugfs_remove_files(const struct drm_info_list *files, int count, struct dentry *root, struct drm_minor *minor) { int i; for (i = 0; i < count; i++) { struct dentry *dent = debugfs_lookup(files[i].name, root); if (!dent) continue; drmm_kfree(minor->dev, d_inode(dent)->i_private); debugfs_remove(dent); } return 0; } EXPORT_SYMBOL(drm_debugfs_remove_files); /** * drm_debugfs_dev_init - create debugfs directory for the device * @dev: the device which we want to create the directory for * @root: the parent directory depending on the device type * * Creates the debugfs directory for the device under the given root directory. */ void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root) { dev->debugfs_root = debugfs_create_dir(dev->unique, root); } /** * drm_debugfs_dev_fini - cleanup debugfs directory * @dev: the device to cleanup the debugfs stuff * * Remove the debugfs directory, might be called multiple times. */ void drm_debugfs_dev_fini(struct drm_device *dev) { debugfs_remove_recursive(dev->debugfs_root); dev->debugfs_root = NULL; } void drm_debugfs_dev_register(struct drm_device *dev) { drm_debugfs_add_files(dev, drm_debugfs_list, DRM_DEBUGFS_ENTRIES); if (drm_core_check_feature(dev, DRIVER_MODESET)) { drm_framebuffer_debugfs_init(dev); drm_client_debugfs_init(dev); } if (drm_drv_uses_atomic_modeset(dev)) drm_atomic_debugfs_init(dev); } int drm_debugfs_register(struct drm_minor *minor, int minor_id, struct dentry *root) { struct drm_device *dev = minor->dev; char name[64]; sprintf(name, "%d", minor_id); minor->debugfs_symlink = debugfs_create_symlink(name, root, dev->unique); /* TODO: Only for compatibility with drivers */ minor->debugfs_root = dev->debugfs_root; if (dev->driver->debugfs_init && dev->render != minor) dev->driver->debugfs_init(minor); return 0; } void drm_debugfs_unregister(struct drm_minor *minor) { debugfs_remove(minor->debugfs_symlink); minor->debugfs_symlink = NULL; } /** * drm_debugfs_add_file - Add a given file to the DRM device debugfs file list * @dev: drm device for the ioctl * @name: debugfs file name * @show: show callback * @data: driver-private data, should not be device-specific * * Add a given file entry to the DRM device debugfs file list to be created on * drm_debugfs_init. */ void drm_debugfs_add_file(struct drm_device *dev, const char *name, int (*show)(struct seq_file*, void*), void *data) { struct drm_debugfs_entry *entry = drmm_kzalloc(dev, sizeof(*entry), GFP_KERNEL); if (!entry) return; entry->file.name = name; entry->file.show = show; entry->file.data = data; entry->dev = dev; debugfs_create_file(name, 0444, dev->debugfs_root, entry, &drm_debugfs_entry_fops); } EXPORT_SYMBOL(drm_debugfs_add_file); /** * drm_debugfs_add_files - Add an array of files to the DRM device debugfs file list * @dev: drm device for the ioctl * @files: The array of files to create * @count: The number of files given * * Add a given set of debugfs files represented by an array of * &struct drm_debugfs_info in the DRM device debugfs file list. */ void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count) { int i; for (i = 0; i < count; i++) drm_debugfs_add_file(dev, files[i].name, files[i].show, files[i].data); } EXPORT_SYMBOL(drm_debugfs_add_files); static int connector_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; seq_printf(m, "%s\n", drm_get_connector_force_name(connector->force)); return 0; } static int connector_open(struct inode *inode, struct file *file) { struct drm_connector *dev = inode->i_private; return single_open(file, connector_show, dev); } static ssize_t connector_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_connector *connector = m->private; char buf[12]; if (len > sizeof(buf) - 1) return -EINVAL; if (copy_from_user(buf, ubuf, len)) return -EFAULT; buf[len] = '\0'; if (sysfs_streq(buf, "on")) connector->force = DRM_FORCE_ON; else if (sysfs_streq(buf, "digital")) connector->force = DRM_FORCE_ON_DIGITAL; else if (sysfs_streq(buf, "off")) connector->force = DRM_FORCE_OFF; else if (sysfs_streq(buf, "unspecified")) connector->force = DRM_FORCE_UNSPECIFIED; else return -EINVAL; return len; } static int edid_show(struct seq_file *m, void *data) { return drm_edid_override_show(m->private, m); } static int edid_open(struct inode *inode, struct file *file) { struct drm_connector *dev = inode->i_private; return single_open(file, edid_show, dev); } static ssize_t edid_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_connector *connector = m->private; char *buf; int ret; buf = memdup_user(ubuf, len); if (IS_ERR(buf)) return PTR_ERR(buf); if (len == 5 && !strncmp(buf, "reset", 5)) ret = drm_edid_override_reset(connector); else ret = drm_edid_override_set(connector, buf, len); kfree(buf); return ret ? ret : len; } /* * Returns the min and max vrr vfreq through the connector's debugfs file. * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range */ static int vrr_range_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; if (connector->status != connector_status_connected) return -ENODEV; seq_printf(m, "Min: %u\n", connector->display_info.monitor_range.min_vfreq); seq_printf(m, "Max: %u\n", connector->display_info.monitor_range.max_vfreq); return 0; } DEFINE_SHOW_ATTRIBUTE(vrr_range); /* * Returns Connector's max supported bpc through debugfs file. * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc */ static int output_bpc_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; if (connector->status != connector_status_connected) return -ENODEV; seq_printf(m, "Maximum: %u\n", connector->display_info.bpc); return 0; } DEFINE_SHOW_ATTRIBUTE(output_bpc); static const struct file_operations drm_edid_fops = { .owner = THIS_MODULE, .open = edid_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = edid_write }; static const struct file_operations drm_connector_fops = { .owner = THIS_MODULE, .open = connector_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = connector_write }; static ssize_t audio_infoframe_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct drm_connector_hdmi_infoframe *infoframe; struct drm_connector *connector; union hdmi_infoframe *frame; u8 buf[HDMI_INFOFRAME_SIZE(AUDIO)]; ssize_t len = 0; connector = filp->private_data; mutex_lock(&connector->hdmi.infoframes.lock); infoframe = &connector->hdmi.infoframes.audio; if (!infoframe->set) goto out; frame = &infoframe->data; len = hdmi_infoframe_pack(frame, buf, sizeof(buf)); if (len < 0) goto out; len = simple_read_from_buffer(ubuf, count, ppos, buf, len); out: mutex_unlock(&connector->hdmi.infoframes.lock); return len; } static const struct file_operations audio_infoframe_fops = { .owner = THIS_MODULE, .open = simple_open, .read = audio_infoframe_read, }; static int create_hdmi_audio_infoframe_file(struct drm_connector *connector, struct dentry *parent) { struct dentry *file; file = debugfs_create_file("audio", 0400, parent, connector, &audio_infoframe_fops); if (IS_ERR(file)) return PTR_ERR(file); return 0; } #define DEFINE_INFOFRAME_FILE(_f) \ static ssize_t _f##_read_infoframe(struct file *filp, \ char __user *ubuf, \ size_t count, \ loff_t *ppos) \ { \ struct drm_connector_hdmi_infoframe *infoframe; \ struct drm_connector_state *conn_state; \ struct drm_connector *connector; \ union hdmi_infoframe *frame; \ struct drm_device *dev; \ u8 buf[HDMI_INFOFRAME_SIZE(MAX)]; \ ssize_t len = 0; \ \ connector = filp->private_data; \ dev = connector->dev; \ \ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); \ \ conn_state = connector->state; \ infoframe = &conn_state->hdmi.infoframes._f; \ if (!infoframe->set) \ goto out; \ \ frame = &infoframe->data; \ len = hdmi_infoframe_pack(frame, buf, sizeof(buf)); \ if (len < 0) \ goto out; \ \ len = simple_read_from_buffer(ubuf, count, ppos, buf, len); \ \ out: \ drm_modeset_unlock(&dev->mode_config.connection_mutex); \ return len; \ } \ \ static const struct file_operations _f##_infoframe_fops = { \ .owner = THIS_MODULE, \ .open = simple_open, \ .read = _f##_read_infoframe, \ }; \ \ static int create_hdmi_## _f ## _infoframe_file(struct drm_connector *connector, \ struct dentry *parent) \ { \ struct dentry *file; \ \ file = debugfs_create_file(#_f, 0400, parent, connector, &_f ## _infoframe_fops); \ if (IS_ERR(file)) \ return PTR_ERR(file); \ \ return 0; \ } DEFINE_INFOFRAME_FILE(avi); DEFINE_INFOFRAME_FILE(hdmi); DEFINE_INFOFRAME_FILE(hdr_drm); DEFINE_INFOFRAME_FILE(spd); static int create_hdmi_infoframe_files(struct drm_connector *connector, struct dentry *parent) { int ret; ret = create_hdmi_audio_infoframe_file(connector, parent); if (ret) return ret; ret = create_hdmi_avi_infoframe_file(connector, parent); if (ret) return ret; ret = create_hdmi_hdmi_infoframe_file(connector, parent); if (ret) return ret; ret = create_hdmi_hdr_drm_infoframe_file(connector, parent); if (ret) return ret; ret = create_hdmi_spd_infoframe_file(connector, parent); if (ret) return ret; return 0; } static void hdmi_debugfs_add(struct drm_connector *connector) { struct dentry *dir; if (!(connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) return; dir = debugfs_create_dir("infoframes", connector->debugfs_entry); if (IS_ERR(dir)) return; create_hdmi_infoframe_files(connector, dir); } void drm_debugfs_connector_add(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct dentry *root; if (!dev->debugfs_root) return; root = debugfs_create_dir(connector->name, dev->debugfs_root); connector->debugfs_entry = root; /* force */ debugfs_create_file("force", 0644, root, connector, &drm_connector_fops); /* edid */ debugfs_create_file("edid_override", 0644, root, connector, &drm_edid_fops); /* vrr range */ debugfs_create_file("vrr_range", 0444, root, connector, &vrr_range_fops); /* max bpc */ debugfs_create_file("output_bpc", 0444, root, connector, &output_bpc_fops); hdmi_debugfs_add(connector); if (connector->funcs->debugfs_init) connector->funcs->debugfs_init(connector, root); } void drm_debugfs_connector_remove(struct drm_connector *connector) { if (!connector->debugfs_entry) return; debugfs_remove_recursive(connector->debugfs_entry); connector->debugfs_entry = NULL; } void drm_debugfs_crtc_add(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct dentry *root; char *name; name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index); if (!name) return; root = debugfs_create_dir(name, dev->debugfs_root); kfree(name); crtc->debugfs_entry = root; drm_debugfs_crtc_crc_add(crtc); } void drm_debugfs_crtc_remove(struct drm_crtc *crtc) { debugfs_remove_recursive(crtc->debugfs_entry); crtc->debugfs_entry = NULL; } static int bridges_show(struct seq_file *m, void *data) { struct drm_encoder *encoder = m->private; struct drm_printer p = drm_seq_file_printer(m); struct drm_bridge *bridge; unsigned int idx = 0; drm_for_each_bridge_in_chain(encoder, bridge) { drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs); drm_printf(&p, "\ttype: [%d] %s\n", bridge->type, drm_get_connector_type_name(bridge->type)); if (bridge->of_node) drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node); drm_printf(&p, "\tops: [0x%x]", bridge->ops); if (bridge->ops & DRM_BRIDGE_OP_DETECT) drm_puts(&p, " detect"); if (bridge->ops & DRM_BRIDGE_OP_EDID) drm_puts(&p, " edid"); if (bridge->ops & DRM_BRIDGE_OP_HPD) drm_puts(&p, " hpd"); if (bridge->ops & DRM_BRIDGE_OP_MODES) drm_puts(&p, " modes"); if (bridge->ops & DRM_BRIDGE_OP_HDMI) drm_puts(&p, " hdmi"); drm_puts(&p, "\n"); } return 0; } DEFINE_SHOW_ATTRIBUTE(bridges); void drm_debugfs_encoder_add(struct drm_encoder *encoder) { struct drm_minor *minor = encoder->dev->primary; struct dentry *root; char *name; name = kasprintf(GFP_KERNEL, "encoder-%d", encoder->index); if (!name) return; root = debugfs_create_dir(name, minor->debugfs_root); kfree(name); encoder->debugfs_entry = root; /* bridges list */ debugfs_create_file("bridges", 0444, root, encoder, &bridges_fops); if (encoder->funcs && encoder->funcs->debugfs_init) encoder->funcs->debugfs_init(encoder, root); } void drm_debugfs_encoder_remove(struct drm_encoder *encoder) { debugfs_remove_recursive(encoder->debugfs_entry); encoder->debugfs_entry = NULL; } |
11 1 10 8 1 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011 Florian Westphal <fw@strlen.de> * * based on fib_frontend.c; Author: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <net/inet_dscp.h> #include <linux/ip.h> #include <net/ip.h> #include <net/ip_fib.h> #include <net/route.h> #include <linux/netfilter/xt_rpfilter.h> #include <linux/netfilter/x_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Westphal <fw@strlen.de>"); MODULE_DESCRIPTION("iptables: ipv4 reverse path filter match"); /* don't try to find route from mcast/bcast/zeronet */ static __be32 rpfilter_get_saddr(__be32 addr) { if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) || ipv4_is_zeronet(addr)) return 0; return addr; } static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4, const struct net_device *dev, u8 flags) { struct fib_result res; if (fib_lookup(net, fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE)) return false; if (res.type != RTN_UNICAST) { if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL)) return false; } return fib_info_nh_uses_dev(res.fi, dev) || flags & XT_RPFILTER_LOOSE; } static bool rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in) { return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK; } static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_rpfilter_info *info; const struct iphdr *iph; struct flowi4 flow; bool invert; info = par->matchinfo; invert = info->flags & XT_RPFILTER_INVERT; if (rpfilter_is_loopback(skb, xt_in(par))) return true ^ invert; iph = ip_hdr(skb); if (ipv4_is_zeronet(iph->saddr)) { if (ipv4_is_lbcast(iph->daddr) || ipv4_is_local_multicast(iph->daddr)) return true ^ invert; } memset(&flow, 0, sizeof(flow)); flow.flowi4_iif = LOOPBACK_IFINDEX; flow.daddr = iph->saddr; flow.saddr = rpfilter_get_saddr(iph->daddr); flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; flow.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)); flow.flowi4_scope = RT_SCOPE_UNIVERSE; flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par)); flow.flowi4_uid = sock_net_uid(xt_net(par), NULL); return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert; } static int rpfilter_check(const struct xt_mtchk_param *par) { const struct xt_rpfilter_info *info = par->matchinfo; unsigned int options = ~XT_RPFILTER_OPTION_MASK; if (info->flags & options) { pr_info_ratelimited("unknown options\n"); return -EINVAL; } if (strcmp(par->table, "mangle") != 0 && strcmp(par->table, "raw") != 0) { pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n", par->table); return -EINVAL; } return 0; } static struct xt_match rpfilter_mt_reg __read_mostly = { .name = "rpfilter", .family = NFPROTO_IPV4, .checkentry = rpfilter_check, .match = rpfilter_mt, .matchsize = sizeof(struct xt_rpfilter_info), .hooks = (1 << NF_INET_PRE_ROUTING), .me = THIS_MODULE }; static int __init rpfilter_mt_init(void) { return xt_register_match(&rpfilter_mt_reg); } static void __exit rpfilter_mt_exit(void) { xt_unregister_match(&rpfilter_mt_reg); } module_init(rpfilter_mt_init); module_exit(rpfilter_mt_exit); |
1 1 3 2 3 3 1 2 3 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for DragonRise Inc. game controllers * * From what I have gathered, these devices are mass produced in China and are * distributed under several vendors. They often share the same design as * the original PlayStation DualShock controller. * * 0079:0006 "DragonRise Inc. Generic USB Joystick " * - tested with a Tesun USB-703 game controller. * * Copyright (c) 2009 Richard Walmsley <richwalm@gmail.com> */ /* */ #include <linux/input.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #ifdef CONFIG_DRAGONRISE_FF struct drff_device { struct hid_report *report; }; static int drff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct drff_device *drff = data; int strong, weak; strong = effect->u.rumble.strong_magnitude; weak = effect->u.rumble.weak_magnitude; dbg_hid("called with 0x%04x 0x%04x", strong, weak); if (strong || weak) { strong = strong * 0xff / 0xffff; weak = weak * 0xff / 0xffff; /* While reverse engineering this device, I found that when this value is set, it causes the strong rumble to function at a near maximum speed, so we'll bypass it. */ if (weak == 0x0a) weak = 0x0b; drff->report->field[0]->value[0] = 0x51; drff->report->field[0]->value[1] = 0x00; drff->report->field[0]->value[2] = weak; drff->report->field[0]->value[4] = strong; hid_hw_request(hid, drff->report, HID_REQ_SET_REPORT); drff->report->field[0]->value[0] = 0xfa; drff->report->field[0]->value[1] = 0xfe; } else { drff->report->field[0]->value[0] = 0xf3; drff->report->field[0]->value[1] = 0x00; } drff->report->field[0]->value[2] = 0x00; drff->report->field[0]->value[4] = 0x00; dbg_hid("running with 0x%02x 0x%02x", strong, weak); hid_hw_request(hid, drff->report, HID_REQ_SET_REPORT); return 0; } static int drff_init(struct hid_device *hid) { struct drff_device *drff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev; int error; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_first_entry(&hid->inputs, struct hid_input, list); dev = hidinput->input; if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; } report = list_first_entry(report_list, struct hid_report, list); if (report->maxfield < 1) { hid_err(hid, "no fields in the report\n"); return -ENODEV; } if (report->field[0]->report_count < 7) { hid_err(hid, "not enough values in the field\n"); return -ENODEV; } drff = kzalloc(sizeof(struct drff_device), GFP_KERNEL); if (!drff) return -ENOMEM; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, drff, drff_play); if (error) { kfree(drff); return error; } drff->report = report; drff->report->field[0]->value[0] = 0xf3; drff->report->field[0]->value[1] = 0x00; drff->report->field[0]->value[2] = 0x00; drff->report->field[0]->value[3] = 0x00; drff->report->field[0]->value[4] = 0x00; drff->report->field[0]->value[5] = 0x00; drff->report->field[0]->value[6] = 0x00; hid_hw_request(hid, drff->report, HID_REQ_SET_REPORT); hid_info(hid, "Force Feedback for DragonRise Inc. " "game controllers by Richard Walmsley <richwalm@gmail.com>\n"); return 0; } #else static inline int drff_init(struct hid_device *hid) { return 0; } #endif /* * The original descriptor of joystick with PID 0x0011, represented by DVTech PC * JS19. It seems both copied from another device and a result of confusion * either about the specification or about the program used to create the * descriptor. In any case, it's a wonder it works on Windows. * * Usage Page (Desktop), ; Generic desktop controls (01h) * Usage (Joystick), ; Joystick (04h, application collection) * Collection (Application), * Collection (Logical), * Report Size (8), * Report Count (5), * Logical Minimum (0), * Logical Maximum (255), * Physical Minimum (0), * Physical Maximum (255), * Usage (X), ; X (30h, dynamic value) * Usage (X), ; X (30h, dynamic value) * Usage (X), ; X (30h, dynamic value) * Usage (X), ; X (30h, dynamic value) * Usage (Y), ; Y (31h, dynamic value) * Input (Variable), * Report Size (4), * Report Count (1), * Logical Maximum (7), * Physical Maximum (315), * Unit (Degrees), * Usage (00h), * Input (Variable, Null State), * Unit, * Report Size (1), * Report Count (10), * Logical Maximum (1), * Physical Maximum (1), * Usage Page (Button), ; Button (09h) * Usage Minimum (01h), * Usage Maximum (0Ah), * Input (Variable), * Usage Page (FF00h), ; FF00h, vendor-defined * Report Size (1), * Report Count (10), * Logical Maximum (1), * Physical Maximum (1), * Usage (01h), * Input (Variable), * End Collection, * Collection (Logical), * Report Size (8), * Report Count (4), * Physical Maximum (255), * Logical Maximum (255), * Usage (02h), * Output (Variable), * End Collection, * End Collection */ /* Size of the original descriptor of the PID 0x0011 joystick */ #define PID0011_RDESC_ORIG_SIZE 101 /* Fixed report descriptor for PID 0x011 joystick */ static const __u8 pid0011_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x14, /* Logical Minimum (0), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x01, /* Input (Constant), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x95, 0x02, /* Report Count (2), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x01, /* Input (Constant), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x0A, /* Report Count (10), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0A, /* Usage Maximum (0Ah), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0A, /* Report Count (10), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { switch (hdev->product) { case 0x0011: if (*rsize == PID0011_RDESC_ORIG_SIZE) { *rsize = sizeof(pid0011_rdesc_fixed); return pid0011_rdesc_fixed; } break; } return rdesc; } #define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c)) #define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c)) static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { switch (usage->hid) { /* * revert to the old hid-input behavior where axes * can be randomly assigned when hid->usage is reused. */ case HID_GD_X: case HID_GD_Y: case HID_GD_Z: case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ: if (field->flags & HID_MAIN_ITEM_RELATIVE) map_rel(usage->hid & 0xf); else map_abs(usage->hid & 0xf); return 1; } return 0; } static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; dev_dbg(&hdev->dev, "DragonRise Inc. HID hardware probe..."); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } switch (hdev->product) { case 0x0006: ret = drff_init(hdev); if (ret) { dev_err(&hdev->dev, "force feedback init failed\n"); hid_hw_stop(hdev); goto err; } break; } return 0; err: return ret; } static const struct hid_device_id dr_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006), }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011), }, { } }; MODULE_DEVICE_TABLE(hid, dr_devices); static struct hid_driver dr_driver = { .name = "dragonrise", .id_table = dr_devices, .report_fixup = dr_report_fixup, .probe = dr_probe, .input_mapping = dr_input_mapping, }; module_hid_driver(dr_driver); MODULE_DESCRIPTION("Force feedback support for DragonRise Inc. game controllers"); MODULE_LICENSE("GPL"); |
160 49 1 110 26 160 99 99 1 164 164 40 162 160 81 149 163 149 149 164 163 164 143 127 164 164 69 68 2 40 40 40 40 2 10 2 9 1 11 15 2 15 15 15 15 15 15 15 14 14 14 14 16 13 13 13 13 2 1 1 1 1 1 1 16 6 6 4 6 2 6 1 16 46 46 46 46 21 21 46 16 16 7 7 6 4 1 16 16 16 16 16 16 1 16 15 15 10 16 19 19 19 19 19 19 19 7 19 7 19 19 19 19 19 24 24 18 16 9 159 2 2 160 160 161 2 2 25 25 25 25 25 25 25 9 9 9 9 8 24 18 2 18 1 1 18 18 15 15 15 11 5 5 15 14 24 8 8 7 8 2 2 75 76 76 31 48 23 76 10 11 2 70 24 21 21 23 2 2 2 1 2 76 1 20 1 72 76 22 78 79 79 11 79 28 78 19 19 19 77 78 78 78 76 63 24 19 19 14 14 12 15 15 79 124 123 125 125 124 2 122 122 122 123 92 122 85 69 113 69 7 6 114 24 24 7 6 114 96 14 23 23 86 19 66 66 55 9 46 46 30 30 37 19 19 66 63 105 3 3 19 19 4 19 19 77 9 123 123 75 72 72 10 123 10 64 63 63 64 63 63 63 63 5 5 5 5 2 3 1 1 1 1 1 1 1 2 5 5 5 1 1 1 1 1 79 80 48 48 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * Copyright (c) 2016-2018 Christoph Hellwig. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_bmap_btree.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_errortag.h" #include "xfs_error.h" #include "xfs_trans.h" #include "xfs_trans_space.h" #include "xfs_inode_item.h" #include "xfs_iomap.h" #include "xfs_trace.h" #include "xfs_quota.h" #include "xfs_rtgroup.h" #include "xfs_dquot_item.h" #include "xfs_dquot.h" #include "xfs_reflink.h" #include "xfs_health.h" #include "xfs_rtbitmap.h" #include "xfs_icache.h" #include "xfs_zone_alloc.h" #define XFS_ALLOC_ALIGN(mp, off) \ (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log) static int xfs_alert_fsblock_zero( xfs_inode_t *ip, xfs_bmbt_irec_t *imap) { xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, "Access to block zero in inode %llu " "start_block: %llx start_off: %llx " "blkcnt: %llx extent-state: %x", (unsigned long long)ip->i_ino, (unsigned long long)imap->br_startblock, (unsigned long long)imap->br_startoff, (unsigned long long)imap->br_blockcount, imap->br_state); xfs_bmap_mark_sick(ip, XFS_DATA_FORK); return -EFSCORRUPTED; } u64 xfs_iomap_inode_sequence( struct xfs_inode *ip, u16 iomap_flags) { u64 cookie = 0; if (iomap_flags & IOMAP_F_XATTR) return READ_ONCE(ip->i_af.if_seq); if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp) cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32; return cookie | READ_ONCE(ip->i_df.if_seq); } /* * Check that the iomap passed to us is still valid for the given offset and * length. */ static bool xfs_iomap_valid( struct inode *inode, const struct iomap *iomap) { struct xfs_inode *ip = XFS_I(inode); if (iomap->validity_cookie != xfs_iomap_inode_sequence(ip, iomap->flags)) { trace_xfs_iomap_invalid(ip, iomap); return false; } XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS); return true; } static const struct iomap_folio_ops xfs_iomap_folio_ops = { .iomap_valid = xfs_iomap_valid, }; int xfs_bmbt_to_iomap( struct xfs_inode *ip, struct iomap *iomap, struct xfs_bmbt_irec *imap, unsigned int mapping_flags, u16 iomap_flags, u64 sequence_cookie) { struct xfs_mount *mp = ip->i_mount; struct xfs_buftarg *target = xfs_inode_buftarg(ip); if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) { xfs_bmap_mark_sick(ip, XFS_DATA_FORK); return xfs_alert_fsblock_zero(ip, imap); } if (imap->br_startblock == HOLESTARTBLOCK) { iomap->addr = IOMAP_NULL_ADDR; iomap->type = IOMAP_HOLE; } else if (imap->br_startblock == DELAYSTARTBLOCK || isnullstartblock(imap->br_startblock)) { iomap->addr = IOMAP_NULL_ADDR; iomap->type = IOMAP_DELALLOC; } else { xfs_daddr_t daddr = xfs_fsb_to_db(ip, imap->br_startblock); iomap->addr = BBTOB(daddr); if (mapping_flags & IOMAP_DAX) iomap->addr += target->bt_dax_part_off; if (imap->br_state == XFS_EXT_UNWRITTEN) iomap->type = IOMAP_UNWRITTEN; else iomap->type = IOMAP_MAPPED; /* * Mark iomaps starting at the first sector of a RTG as merge * boundary so that each I/O completions is contained to a * single RTG. */ if (XFS_IS_REALTIME_INODE(ip) && xfs_has_rtgroups(mp) && xfs_rtbno_is_group_start(mp, imap->br_startblock)) iomap->flags |= IOMAP_F_BOUNDARY; } iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); if (mapping_flags & IOMAP_DAX) iomap->dax_dev = target->bt_daxdev; else iomap->bdev = target->bt_bdev; iomap->flags = iomap_flags; if (xfs_ipincount(ip) && (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) iomap->flags |= IOMAP_F_DIRTY; iomap->validity_cookie = sequence_cookie; iomap->folio_ops = &xfs_iomap_folio_ops; return 0; } static void xfs_hole_to_iomap( struct xfs_inode *ip, struct iomap *iomap, xfs_fileoff_t offset_fsb, xfs_fileoff_t end_fsb) { struct xfs_buftarg *target = xfs_inode_buftarg(ip); iomap->addr = IOMAP_NULL_ADDR; iomap->type = IOMAP_HOLE; iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb); iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb); iomap->bdev = target->bt_bdev; iomap->dax_dev = target->bt_daxdev; } static inline xfs_fileoff_t xfs_iomap_end_fsb( struct xfs_mount *mp, loff_t offset, loff_t count) { ASSERT(offset <= mp->m_super->s_maxbytes); return min(XFS_B_TO_FSB(mp, offset + count), XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); } static xfs_extlen_t xfs_eof_alignment( struct xfs_inode *ip) { struct xfs_mount *mp = ip->i_mount; xfs_extlen_t align = 0; if (!XFS_IS_REALTIME_INODE(ip)) { /* * Round up the allocation request to a stripe unit * (m_dalign) boundary if the file size is >= stripe unit * size, and we are allocating past the allocation eof. * * If mounted with the "-o swalloc" option the alignment is * increased from the strip unit size to the stripe width. */ if (mp->m_swidth && xfs_has_swalloc(mp)) align = mp->m_swidth; else if (mp->m_dalign) align = mp->m_dalign; if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) align = 0; } return align; } /* * Check if last_fsb is outside the last extent, and if so grow it to the next * stripe unit boundary. */ xfs_fileoff_t xfs_iomap_eof_align_last_fsb( struct xfs_inode *ip, xfs_fileoff_t end_fsb) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); xfs_extlen_t extsz = xfs_get_extsz_hint(ip); xfs_extlen_t align = xfs_eof_alignment(ip); struct xfs_bmbt_irec irec; struct xfs_iext_cursor icur; ASSERT(!xfs_need_iread_extents(ifp)); /* * Always round up the allocation request to the extent hint boundary. */ if (extsz) { if (align) align = roundup_64(align, extsz); else align = extsz; } if (align) { xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align); xfs_iext_last(ifp, &icur); if (!xfs_iext_get_extent(ifp, &icur, &irec) || aligned_end_fsb >= irec.br_startoff + irec.br_blockcount) return aligned_end_fsb; } return end_fsb; } int xfs_iomap_write_direct( struct xfs_inode *ip, xfs_fileoff_t offset_fsb, xfs_fileoff_t count_fsb, unsigned int flags, struct xfs_bmbt_irec *imap, u64 *seq) { struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; xfs_filblks_t resaligned; int nimaps; unsigned int dblocks, rblocks; bool force = false; int error; int bmapi_flags = XFS_BMAPI_PREALLOC; int nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT; ASSERT(count_fsb > 0); resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, xfs_get_extsz_hint(ip)); if (unlikely(XFS_IS_REALTIME_INODE(ip))) { dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0); rblocks = resaligned; } else { dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); rblocks = 0; } error = xfs_qm_dqattach(ip); if (error) return error; /* * For DAX, we do not allocate unwritten extents, but instead we zero * the block before we commit the transaction. Ideally we'd like to do * this outside the transaction context, but if we commit and then crash * we may not have zeroed the blocks and this will be exposed on * recovery of the allocation. Hence we must zero before commit. * * Further, if we are mapping unwritten extents here, we need to zero * and convert them to written so that we don't need an unwritten extent * callback for DAX. This also means that we need to be able to dip into * the reserve block pool for bmbt block allocation if there is no space * left but we need to do unwritten extent conversion. */ if (flags & IOMAP_DAX) { bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; if (imap->br_state == XFS_EXT_UNWRITTEN) { force = true; nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT; dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; } } error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, rblocks, force, &tp); if (error) return error; error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, nr_exts); if (error) goto out_trans_cancel; /* * From this point onwards we overwrite the imap pointer that the * caller gave to us. */ nimaps = 1; error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0, imap, &nimaps); if (error) goto out_trans_cancel; /* * Complete the transaction */ error = xfs_trans_commit(tp); if (error) goto out_unlock; if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) { xfs_bmap_mark_sick(ip, XFS_DATA_FORK); error = xfs_alert_fsblock_zero(ip, imap); } out_unlock: *seq = xfs_iomap_inode_sequence(ip, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; out_trans_cancel: xfs_trans_cancel(tp); goto out_unlock; } STATIC bool xfs_quota_need_throttle( struct xfs_inode *ip, xfs_dqtype_t type, xfs_fsblock_t alloc_blocks) { struct xfs_dquot *dq = xfs_inode_dquot(ip, type); struct xfs_dquot_res *res; struct xfs_dquot_pre *pre; if (!dq || !xfs_this_quota_on(ip->i_mount, type)) return false; if (XFS_IS_REALTIME_INODE(ip)) { res = &dq->q_rtb; pre = &dq->q_rtb_prealloc; } else { res = &dq->q_blk; pre = &dq->q_blk_prealloc; } /* no hi watermark, no throttle */ if (!pre->q_prealloc_hi_wmark) return false; /* under the lo watermark, no throttle */ if (res->reserved + alloc_blocks < pre->q_prealloc_lo_wmark) return false; return true; } STATIC void xfs_quota_calc_throttle( struct xfs_inode *ip, xfs_dqtype_t type, xfs_fsblock_t *qblocks, int *qshift, int64_t *qfreesp) { struct xfs_dquot *dq = xfs_inode_dquot(ip, type); struct xfs_dquot_res *res; struct xfs_dquot_pre *pre; int64_t freesp; int shift = 0; if (!dq) { res = NULL; pre = NULL; } else if (XFS_IS_REALTIME_INODE(ip)) { res = &dq->q_rtb; pre = &dq->q_rtb_prealloc; } else { res = &dq->q_blk; pre = &dq->q_blk_prealloc; } /* no dq, or over hi wmark, squash the prealloc completely */ if (!res || res->reserved >= pre->q_prealloc_hi_wmark) { *qblocks = 0; *qfreesp = 0; return; } freesp = pre->q_prealloc_hi_wmark - res->reserved; if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT]) { shift = 2; if (freesp < pre->q_low_space[XFS_QLOWSP_3_PCNT]) shift += 2; if (freesp < pre->q_low_space[XFS_QLOWSP_1_PCNT]) shift += 2; } if (freesp < *qfreesp) *qfreesp = freesp; /* only overwrite the throttle values if we are more aggressive */ if ((freesp >> shift) < (*qblocks >> *qshift)) { *qblocks = freesp; *qshift = shift; } } static int64_t xfs_iomap_freesp( struct xfs_mount *mp, unsigned int idx, uint64_t low_space[XFS_LOWSP_MAX], int *shift) { int64_t freesp; freesp = xfs_estimate_freecounter(mp, idx); if (freesp < low_space[XFS_LOWSP_5_PCNT]) { *shift = 2; if (freesp < low_space[XFS_LOWSP_4_PCNT]) (*shift)++; if (freesp < low_space[XFS_LOWSP_3_PCNT]) (*shift)++; if (freesp < low_space[XFS_LOWSP_2_PCNT]) (*shift)++; if (freesp < low_space[XFS_LOWSP_1_PCNT]) (*shift)++; } return freesp; } /* * If we don't have a user specified preallocation size, dynamically increase * the preallocation size as the size of the file grows. Cap the maximum size * at a single extent or less if the filesystem is near full. The closer the * filesystem is to being full, the smaller the maximum preallocation. */ STATIC xfs_fsblock_t xfs_iomap_prealloc_size( struct xfs_inode *ip, int whichfork, loff_t offset, loff_t count, struct xfs_iext_cursor *icur) { struct xfs_iext_cursor ncur = *icur; struct xfs_bmbt_irec prev, got; struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); int64_t freesp; xfs_fsblock_t qblocks; xfs_fsblock_t alloc_blocks = 0; xfs_extlen_t plen; int shift = 0; int qshift = 0; /* * As an exception we don't do any preallocation at all if the file is * smaller than the minimum preallocation and we are using the default * dynamic preallocation scheme, as it is likely this is the only write * to the file that is going to be done. */ if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)) return 0; /* * Use the minimum preallocation size for small files or if we are * writing right after a hole. */ if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || !xfs_iext_prev_extent(ifp, &ncur, &prev) || prev.br_startoff + prev.br_blockcount < offset_fsb) return mp->m_allocsize_blocks; /* * Take the size of the preceding data extents as the basis for the * preallocation size. Note that we don't care if the previous extents * are written or not. */ plen = prev.br_blockcount; while (xfs_iext_prev_extent(ifp, &ncur, &got)) { if (plen > XFS_MAX_BMBT_EXTLEN / 2 || isnullstartblock(got.br_startblock) || got.br_startoff + got.br_blockcount != prev.br_startoff || got.br_startblock + got.br_blockcount != prev.br_startblock) break; plen += got.br_blockcount; prev = got; } /* * If the size of the extents is greater than half the maximum extent * length, then use the current offset as the basis. This ensures that * for large files the preallocation size always extends to * XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe * unit/width alignment of real extents. */ alloc_blocks = plen * 2; if (alloc_blocks > XFS_MAX_BMBT_EXTLEN) alloc_blocks = XFS_B_TO_FSB(mp, offset); qblocks = alloc_blocks; /* * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc * down to the nearest power of two value after throttling. To prevent * the round down from unconditionally reducing the maximum supported * prealloc size, we round up first, apply appropriate throttling, round * down and cap the value to XFS_BMBT_MAX_EXTLEN. */ alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN), alloc_blocks); if (unlikely(XFS_IS_REALTIME_INODE(ip))) freesp = xfs_rtbxlen_to_blen(mp, xfs_iomap_freesp(mp, XC_FREE_RTEXTENTS, mp->m_low_rtexts, &shift)); else freesp = xfs_iomap_freesp(mp, XC_FREE_BLOCKS, mp->m_low_space, &shift); /* * Check each quota to cap the prealloc size, provide a shift value to * throttle with and adjust amount of available space. */ if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks)) xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift, &freesp); if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks)) xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift, &freesp); if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks)) xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift, &freesp); /* * The final prealloc size is set to the minimum of free space available * in each of the quotas and the overall filesystem. * * The shift throttle value is set to the maximum value as determined by * the global low free space values and per-quota low free space values. */ alloc_blocks = min(alloc_blocks, qblocks); shift = max(shift, qshift); if (shift) alloc_blocks >>= shift; /* * rounddown_pow_of_two() returns an undefined result if we pass in * alloc_blocks = 0. */ if (alloc_blocks) alloc_blocks = rounddown_pow_of_two(alloc_blocks); if (alloc_blocks > XFS_MAX_BMBT_EXTLEN) alloc_blocks = XFS_MAX_BMBT_EXTLEN; /* * If we are still trying to allocate more space than is * available, squash the prealloc hard. This can happen if we * have a large file on a small filesystem and the above * lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN. */ while (alloc_blocks && alloc_blocks >= freesp) alloc_blocks >>= 4; if (alloc_blocks < mp->m_allocsize_blocks) alloc_blocks = mp->m_allocsize_blocks; trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, mp->m_allocsize_blocks); return alloc_blocks; } int xfs_iomap_write_unwritten( xfs_inode_t *ip, xfs_off_t offset, xfs_off_t count, bool update_isize) { xfs_mount_t *mp = ip->i_mount; xfs_fileoff_t offset_fsb; xfs_filblks_t count_fsb; xfs_filblks_t numblks_fsb; int nimaps; xfs_trans_t *tp; xfs_bmbt_irec_t imap; struct inode *inode = VFS_I(ip); xfs_fsize_t i_size; uint resblks; int error; trace_xfs_unwritten_convert(ip, offset, count); offset_fsb = XFS_B_TO_FSBT(mp, offset); count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); /* * Reserve enough blocks in this transaction for two complete extent * btree splits. We may be converting the middle part of an unwritten * extent and in this case we will insert two new extents in the btree * each of which could cause a full split. * * This reservation amount will be used in the first call to * xfs_bmbt_split() to select an AG with enough space to satisfy the * rest of the operation. */ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; /* Attach dquots so that bmbt splits are accounted correctly. */ error = xfs_qm_dqattach(ip); if (error) return error; do { /* * Set up a transaction to convert the range of extents * from unwritten to real. Do allocations in a loop until * we have covered the range passed in. * * Note that we can't risk to recursing back into the filesystem * here as we might be asked to write out the same inode that we * complete here and might deadlock on the iolock. */ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0, true, &tp); if (error) return error; error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, XFS_IEXT_WRITE_UNWRITTEN_CNT); if (error) goto error_on_bmapi_transaction; /* * Modify the unwritten extent state of the buffer. */ nimaps = 1; error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, XFS_BMAPI_CONVERT, resblks, &imap, &nimaps); if (error) goto error_on_bmapi_transaction; /* * Log the updated inode size as we go. We have to be careful * to only log it up to the actual write offset if it is * halfway into a block. */ i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); if (i_size > offset + count) i_size = offset + count; if (update_isize && i_size > i_size_read(inode)) i_size_write(inode, i_size); i_size = xfs_new_eof(ip, i_size); if (i_size) { ip->i_disk_size = i_size; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) return error; if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) { xfs_bmap_mark_sick(ip, XFS_DATA_FORK); return xfs_alert_fsblock_zero(ip, &imap); } if ((numblks_fsb = imap.br_blockcount) == 0) { /* * The numblks_fsb value should always get * smaller, otherwise the loop is stuck. */ ASSERT(imap.br_blockcount); break; } offset_fsb += numblks_fsb; count_fsb -= numblks_fsb; } while (count_fsb > 0); return 0; error_on_bmapi_transaction: xfs_trans_cancel(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } static inline bool imap_needs_alloc( struct inode *inode, unsigned flags, struct xfs_bmbt_irec *imap, int nimaps) { /* don't allocate blocks when just zeroing */ if (flags & IOMAP_ZERO) return false; if (!nimaps || imap->br_startblock == HOLESTARTBLOCK || imap->br_startblock == DELAYSTARTBLOCK) return true; /* we convert unwritten extents before copying the data for DAX */ if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN) return true; return false; } static inline bool imap_needs_cow( struct xfs_inode *ip, unsigned int flags, struct xfs_bmbt_irec *imap, int nimaps) { if (!xfs_is_cow_inode(ip)) return false; /* when zeroing we don't have to COW holes or unwritten extents */ if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) { if (!nimaps || imap->br_startblock == HOLESTARTBLOCK || imap->br_state == XFS_EXT_UNWRITTEN) return false; } return true; } /* * Extents not yet cached requires exclusive access, don't block for * IOMAP_NOWAIT. * * This is basically an opencoded xfs_ilock_data_map_shared() call, but with * support for IOMAP_NOWAIT. */ static int xfs_ilock_for_iomap( struct xfs_inode *ip, unsigned flags, unsigned *lockmode) { if (flags & IOMAP_NOWAIT) { if (xfs_need_iread_extents(&ip->i_df)) return -EAGAIN; if (!xfs_ilock_nowait(ip, *lockmode)) return -EAGAIN; } else { if (xfs_need_iread_extents(&ip->i_df)) *lockmode = XFS_ILOCK_EXCL; xfs_ilock(ip, *lockmode); } return 0; } /* * Check that the imap we are going to return to the caller spans the entire * range that the caller requested for the IO. */ static bool imap_spans_range( struct xfs_bmbt_irec *imap, xfs_fileoff_t offset_fsb, xfs_fileoff_t end_fsb) { if (imap->br_startoff > offset_fsb) return false; if (imap->br_startoff + imap->br_blockcount < end_fsb) return false; return true; } static int xfs_direct_write_iomap_begin( struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; struct xfs_bmbt_irec imap, cmap; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); int nimaps = 1, error = 0; bool shared = false; u16 iomap_flags = 0; unsigned int lockmode; u64 seq; ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO)); if (xfs_is_shutdown(mp)) return -EIO; /* * Writes that span EOF might trigger an IO size update on completion, * so consider them to be dirty for the purposes of O_DSYNC even if * there is no other metadata changes pending or have been made here. */ if (offset + length > i_size_read(inode)) iomap_flags |= IOMAP_F_DIRTY; /* HW-offload atomics are always used in this path */ if (flags & IOMAP_ATOMIC) iomap_flags |= IOMAP_F_ATOMIC_BIO; /* * COW writes may allocate delalloc space or convert unwritten COW * extents, so we need to make sure to take the lock exclusively here. */ if (xfs_is_cow_inode(ip)) lockmode = XFS_ILOCK_EXCL; else lockmode = XFS_ILOCK_SHARED; relock: error = xfs_ilock_for_iomap(ip, flags, &lockmode); if (error) return error; /* * The reflink iflag could have changed since the earlier unlocked * check, check if it again and relock if needed. */ if (xfs_is_cow_inode(ip) && lockmode == XFS_ILOCK_SHARED) { xfs_iunlock(ip, lockmode); lockmode = XFS_ILOCK_EXCL; goto relock; } error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, &nimaps, 0); if (error) goto out_unlock; if (imap_needs_cow(ip, flags, &imap, nimaps)) { error = -EAGAIN; if (flags & IOMAP_NOWAIT) goto out_unlock; /* may drop and re-acquire the ilock */ error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared, &lockmode, (flags & IOMAP_DIRECT) || IS_DAX(inode)); if (error) goto out_unlock; if (shared) goto out_found_cow; end_fsb = imap.br_startoff + imap.br_blockcount; length = XFS_FSB_TO_B(mp, end_fsb) - offset; } if (imap_needs_alloc(inode, flags, &imap, nimaps)) goto allocate_blocks; /* * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with * a single map so that we avoid partial IO failures due to the rest of * the I/O range not covered by this map triggering an EAGAIN condition * when it is subsequently mapped and aborting the I/O. */ if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) { error = -EAGAIN; if (!imap_spans_range(&imap, offset_fsb, end_fsb)) goto out_unlock; } /* * For overwrite only I/O, we cannot convert unwritten extents without * requiring sub-block zeroing. This can only be done under an * exclusive IOLOCK, hence return -EAGAIN if this is not a written * extent to tell the caller to try again. */ if (flags & IOMAP_OVERWRITE_ONLY) { error = -EAGAIN; if (imap.br_state != XFS_EXT_NORM && ((offset | length) & mp->m_blockmask)) goto out_unlock; } seq = xfs_iomap_inode_sequence(ip, iomap_flags); xfs_iunlock(ip, lockmode); trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq); allocate_blocks: error = -EAGAIN; if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) goto out_unlock; /* * We cap the maximum length we map to a sane size to keep the chunks * of work done where somewhat symmetric with the work writeback does. * This is a completely arbitrary number pulled out of thin air as a * best guess for initial testing. * * Note that the values needs to be less than 32-bits wide until the * lower level functions are updated. */ length = min_t(loff_t, length, 1024 * PAGE_SIZE); end_fsb = xfs_iomap_end_fsb(mp, offset, length); if (offset + length > XFS_ISIZE(ip)) end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb); else if (nimaps && imap.br_startblock == HOLESTARTBLOCK) end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount); xfs_iunlock(ip, lockmode); error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb, flags, &imap, &seq); if (error) return error; trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags | IOMAP_F_NEW, seq); out_found_cow: length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount); trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap); if (imap.br_startblock != HOLESTARTBLOCK) { seq = xfs_iomap_inode_sequence(ip, 0); error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq); if (error) goto out_unlock; } seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED); xfs_iunlock(ip, lockmode); return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq); out_unlock: if (lockmode) xfs_iunlock(ip, lockmode); return error; } const struct iomap_ops xfs_direct_write_iomap_ops = { .iomap_begin = xfs_direct_write_iomap_begin, }; #ifdef CONFIG_XFS_RT /* * This is really simple. The space has already been reserved before taking the * IOLOCK, the actual block allocation is done just before submitting the bio * and only recorded in the extent map on I/O completion. */ static int xfs_zoned_direct_write_iomap_begin( struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { struct xfs_inode *ip = XFS_I(inode); int error; ASSERT(!(flags & IOMAP_OVERWRITE_ONLY)); /* * Needs to be pushed down into the allocator so that only writes into * a single zone can be supported. */ if (flags & IOMAP_NOWAIT) return -EAGAIN; /* * Ensure the extent list is in memory in so that we don't have to do * read it from the I/O completion handler. */ if (xfs_need_iread_extents(&ip->i_df)) { xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) return error; } iomap->type = IOMAP_MAPPED; iomap->flags = IOMAP_F_DIRTY; iomap->bdev = ip->i_mount->m_rtdev_targp->bt_bdev; iomap->offset = offset; iomap->length = length; iomap->flags = IOMAP_F_ANON_WRITE; return 0; } const struct iomap_ops xfs_zoned_direct_write_iomap_ops = { .iomap_begin = xfs_zoned_direct_write_iomap_begin, }; #endif /* CONFIG_XFS_RT */ static int xfs_dax_write_iomap_end( struct inode *inode, loff_t pos, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap) { struct xfs_inode *ip = XFS_I(inode); if (!xfs_is_cow_inode(ip)) return 0; if (!written) return xfs_reflink_cancel_cow_range(ip, pos, length, true); return xfs_reflink_end_cow(ip, pos, written); } const struct iomap_ops xfs_dax_write_iomap_ops = { .iomap_begin = xfs_direct_write_iomap_begin, .iomap_end = xfs_dax_write_iomap_end, }; /* * Convert a hole to a delayed allocation. */ static void xfs_bmap_add_extent_hole_delay( struct xfs_inode *ip, /* incore inode pointer */ int whichfork, struct xfs_iext_cursor *icur, struct xfs_bmbt_irec *new) /* new data to add to file extents */ { struct xfs_ifork *ifp; /* inode fork pointer */ xfs_bmbt_irec_t left; /* left neighbor extent entry */ xfs_filblks_t newlen=0; /* new indirect size */ xfs_filblks_t oldlen=0; /* old indirect size */ xfs_bmbt_irec_t right; /* right neighbor extent entry */ uint32_t state = xfs_bmap_fork_to_state(whichfork); xfs_filblks_t temp; /* temp for indirect calculations */ ifp = xfs_ifork_ptr(ip, whichfork); ASSERT(isnullstartblock(new->br_startblock)); /* * Check and set flags if this segment has a left neighbor */ if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { state |= BMAP_LEFT_VALID; if (isnullstartblock(left.br_startblock)) state |= BMAP_LEFT_DELAY; } /* * Check and set flags if the current (right) segment exists. * If it doesn't exist, we're converting the hole at end-of-file. */ if (xfs_iext_get_extent(ifp, icur, &right)) { state |= BMAP_RIGHT_VALID; if (isnullstartblock(right.br_startblock)) state |= BMAP_RIGHT_DELAY; } /* * Set contiguity flags on the left and right neighbors. * Don't let extents get too large, even if the pieces are contiguous. */ if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && left.br_startoff + left.br_blockcount == new->br_startoff && left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) state |= BMAP_LEFT_CONTIG; if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && new->br_startoff + new->br_blockcount == right.br_startoff && new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && (!(state & BMAP_LEFT_CONTIG) || (left.br_blockcount + new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))) state |= BMAP_RIGHT_CONTIG; /* * Switch out based on the contiguity flags. */ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: /* * New allocation is contiguous with delayed allocations * on the left and on the right. * Merge all three into a single extent record. */ temp = left.br_blockcount + new->br_blockcount + right.br_blockcount; oldlen = startblockval(left.br_startblock) + startblockval(new->br_startblock) + startblockval(right.br_startblock); newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), oldlen); left.br_startblock = nullstartblock(newlen); left.br_blockcount = temp; xfs_iext_remove(ip, icur, state); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &left); break; case BMAP_LEFT_CONTIG: /* * New allocation is contiguous with a delayed allocation * on the left. * Merge the new allocation with the left neighbor. */ temp = left.br_blockcount + new->br_blockcount; oldlen = startblockval(left.br_startblock) + startblockval(new->br_startblock); newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), oldlen); left.br_blockcount = temp; left.br_startblock = nullstartblock(newlen); xfs_iext_prev(ifp, icur); xfs_iext_update_extent(ip, state, icur, &left); break; case BMAP_RIGHT_CONTIG: /* * New allocation is contiguous with a delayed allocation * on the right. * Merge the new allocation with the right neighbor. */ temp = new->br_blockcount + right.br_blockcount; oldlen = startblockval(new->br_startblock) + startblockval(right.br_startblock); newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), oldlen); right.br_startoff = new->br_startoff; right.br_startblock = nullstartblock(newlen); right.br_blockcount = temp; xfs_iext_update_extent(ip, state, icur, &right); break; case 0: /* * New allocation is not contiguous with another * delayed allocation. * Insert a new entry. */ oldlen = newlen = 0; xfs_iext_insert(ip, icur, new, state); break; } if (oldlen != newlen) { ASSERT(oldlen > newlen); xfs_add_fdblocks(ip->i_mount, oldlen - newlen); /* * Nothing to do for disk quota accounting here. */ xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen); } } /* * Add a delayed allocation extent to an inode. Blocks are reserved from the * global pool and the extent inserted into the inode in-core extent tree. * * On entry, got refers to the first extent beyond the offset of the extent to * allocate or eof is specified if no such extent exists. On return, got refers * to the extent record that was inserted to the inode fork. * * Note that the allocated extent may have been merged with contiguous extents * during insertion into the inode fork. Thus, got does not reflect the current * state of the inode fork on return. If necessary, the caller can use lastx to * look up the updated record in the inode fork. */ static int xfs_bmapi_reserve_delalloc( struct xfs_inode *ip, int whichfork, xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc, struct xfs_bmbt_irec *got, struct xfs_iext_cursor *icur, int eof) { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); xfs_extlen_t alen; xfs_extlen_t indlen; uint64_t fdblocks; int error; xfs_fileoff_t aoff; bool use_cowextszhint = whichfork == XFS_COW_FORK && !prealloc; retry: /* * Cap the alloc length. Keep track of prealloc so we know whether to * tag the inode before we return. */ aoff = off; alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN); if (!eof) alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); if (prealloc && alen >= len) prealloc = alen - len; /* * If we're targetting the COW fork but aren't creating a speculative * posteof preallocation, try to expand the reservation to align with * the COW extent size hint if there's sufficient free space. * * Unlike the data fork, the CoW cancellation functions will free all * the reservations at inactivation, so we don't require that every * delalloc reservation have a dirty pagecache. */ if (use_cowextszhint) { struct xfs_bmbt_irec prev; xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) prev.br_startoff = NULLFILEOFF; error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 1, 0, &aoff, &alen); ASSERT(!error); } /* * Make a transaction-less quota reservation for delayed allocation * blocks. This number gets adjusted later. We return if we haven't * allocated blocks already inside this loop. */ error = xfs_quota_reserve_blkres(ip, alen); if (error) goto out; /* * Split changing sb for alen and indlen since they could be coming * from different places. */ indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); ASSERT(indlen > 0); fdblocks = indlen; if (XFS_IS_REALTIME_INODE(ip)) { ASSERT(!xfs_is_zoned_inode(ip)); error = xfs_dec_frextents(mp, xfs_blen_to_rtbxlen(mp, alen)); if (error) goto out_unreserve_quota; } else { fdblocks += alen; } error = xfs_dec_fdblocks(mp, fdblocks, false); if (error) goto out_unreserve_frextents; ip->i_delayed_blks += alen; xfs_mod_delalloc(ip, alen, indlen); got->br_startoff = aoff; got->br_startblock = nullstartblock(indlen); got->br_blockcount = alen; got->br_state = XFS_EXT_NORM; xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); /* * Tag the inode if blocks were preallocated. Note that COW fork * preallocation can occur at the start or end of the extent, even when * prealloc == 0, so we must also check the aligned offset and length. */ if (whichfork == XFS_DATA_FORK && prealloc) xfs_inode_set_eofblocks_tag(ip); if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) xfs_inode_set_cowblocks_tag(ip); return 0; out_unreserve_frextents: if (XFS_IS_REALTIME_INODE(ip)) xfs_add_frextents(mp, xfs_blen_to_rtbxlen(mp, alen)); out_unreserve_quota: if (XFS_IS_QUOTA_ON(mp)) xfs_quota_unreserve_blkres(ip, alen); out: if (error == -ENOSPC || error == -EDQUOT) { trace_xfs_delalloc_enospc(ip, off, len); if (prealloc || use_cowextszhint) { /* retry without any preallocation */ use_cowextszhint = false; prealloc = 0; goto retry; } } return error; } static int xfs_zoned_buffered_write_iomap_begin( struct inode *inode, loff_t offset, loff_t count, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); struct xfs_zone_alloc_ctx *ac = iter->private; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count); u16 iomap_flags = IOMAP_F_SHARED; unsigned int lockmode = XFS_ILOCK_EXCL; xfs_filblks_t count_fsb; xfs_extlen_t indlen; struct xfs_bmbt_irec got; struct xfs_iext_cursor icur; int error = 0; ASSERT(!xfs_get_extsz_hint(ip)); ASSERT(!(flags & IOMAP_UNSHARE)); ASSERT(ac); if (xfs_is_shutdown(mp)) return -EIO; error = xfs_qm_dqattach(ip); if (error) return error; error = xfs_ilock_for_iomap(ip, flags, &lockmode); if (error) return error; if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { xfs_bmap_mark_sick(ip, XFS_DATA_FORK); error = -EFSCORRUPTED; goto out_unlock; } XFS_STATS_INC(mp, xs_blk_mapw); error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); if (error) goto out_unlock; /* * For zeroing operations check if there is any data to zero first. * * For regular writes we always need to allocate new blocks, but need to * provide the source mapping when the range is unaligned to support * read-modify-write of the whole block in the page cache. * * In either case we need to limit the reported range to the boundaries * of the source map in the data fork. */ if (!IS_ALIGNED(offset, mp->m_sb.sb_blocksize) || !IS_ALIGNED(offset + count, mp->m_sb.sb_blocksize) || (flags & IOMAP_ZERO)) { struct xfs_bmbt_irec smap; struct xfs_iext_cursor scur; if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &scur, &smap)) smap.br_startoff = end_fsb; /* fake hole until EOF */ if (smap.br_startoff > offset_fsb) { /* * We never need to allocate blocks for zeroing a hole. */ if (flags & IOMAP_ZERO) { xfs_hole_to_iomap(ip, iomap, offset_fsb, smap.br_startoff); goto out_unlock; } end_fsb = min(end_fsb, smap.br_startoff); } else { end_fsb = min(end_fsb, smap.br_startoff + smap.br_blockcount); xfs_trim_extent(&smap, offset_fsb, end_fsb - offset_fsb); error = xfs_bmbt_to_iomap(ip, srcmap, &smap, flags, 0, xfs_iomap_inode_sequence(ip, 0)); if (error) goto out_unlock; } } if (!ip->i_cowfp) xfs_ifork_init_cow(ip); if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got)) got.br_startoff = end_fsb; if (got.br_startoff <= offset_fsb) { trace_xfs_reflink_cow_found(ip, &got); goto done; } /* * Cap the maximum length to keep the chunks of work done here somewhat * symmetric with the work writeback does. */ end_fsb = min(end_fsb, got.br_startoff); count_fsb = min3(end_fsb - offset_fsb, XFS_MAX_BMBT_EXTLEN, XFS_B_TO_FSB(mp, 1024 * PAGE_SIZE)); /* * The block reservation is supposed to cover all blocks that the * operation could possible write, but there is a nasty corner case * where blocks could be stolen from underneath us: * * 1) while this thread iterates over a larger buffered write, * 2) another thread is causing a write fault that calls into * ->page_mkwrite in range this thread writes to, using up the * delalloc reservation created by a previous call to this function. * 3) another thread does direct I/O on the range that the write fault * happened on, which causes writeback of the dirty data. * 4) this then set the stale flag, which cuts the current iomap * iteration short, causing the new call to ->iomap_begin that gets * us here again, but now without a sufficient reservation. * * This is a very unusual I/O pattern, and nothing but generic/095 is * known to hit it. There's not really much we can do here, so turn this * into a short write. */ if (count_fsb > ac->reserved_blocks) { xfs_warn_ratelimited(mp, "Short write on ino 0x%llx comm %.20s due to three-way race with write fault and direct I/O", ip->i_ino, current->comm); count_fsb = ac->reserved_blocks; if (!count_fsb) { error = -EIO; goto out_unlock; } } error = xfs_quota_reserve_blkres(ip, count_fsb); if (error) goto out_unlock; indlen = xfs_bmap_worst_indlen(ip, count_fsb); error = xfs_dec_fdblocks(mp, indlen, false); if (error) goto out_unlock; ip->i_delayed_blks += count_fsb; xfs_mod_delalloc(ip, count_fsb, indlen); got.br_startoff = offset_fsb; got.br_startblock = nullstartblock(indlen); got.br_blockcount = count_fsb; got.br_state = XFS_EXT_NORM; xfs_bmap_add_extent_hole_delay(ip, XFS_COW_FORK, &icur, &got); ac->reserved_blocks -= count_fsb; iomap_flags |= IOMAP_F_NEW; trace_xfs_iomap_alloc(ip, offset, XFS_FSB_TO_B(mp, count_fsb), XFS_COW_FORK, &got); done: error = xfs_bmbt_to_iomap(ip, iomap, &got, flags, iomap_flags, xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED)); out_unlock: xfs_iunlock(ip, lockmode); return error; } static int xfs_buffered_write_iomap_begin( struct inode *inode, loff_t offset, loff_t count, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count); struct xfs_bmbt_irec imap, cmap; struct xfs_iext_cursor icur, ccur; xfs_fsblock_t prealloc_blocks = 0; bool eof = false, cow_eof = false, shared = false; int allocfork = XFS_DATA_FORK; int error = 0; unsigned int lockmode = XFS_ILOCK_EXCL; unsigned int iomap_flags = 0; u64 seq; if (xfs_is_shutdown(mp)) return -EIO; if (xfs_is_zoned_inode(ip)) return xfs_zoned_buffered_write_iomap_begin(inode, offset, count, flags, iomap, srcmap); /* we can't use delayed allocations when using extent size hints */ if (xfs_get_extsz_hint(ip)) return xfs_direct_write_iomap_begin(inode, offset, count, flags, iomap, srcmap); error = xfs_qm_dqattach(ip); if (error) return error; error = xfs_ilock_for_iomap(ip, flags, &lockmode); if (error) return error; if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { xfs_bmap_mark_sick(ip, XFS_DATA_FORK); error = -EFSCORRUPTED; goto out_unlock; } XFS_STATS_INC(mp, xs_blk_mapw); error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); if (error) goto out_unlock; /* * Search the data fork first to look up our source mapping. We * always need the data fork map, as we have to return it to the * iomap code so that the higher level write code can read data in to * perform read-modify-write cycles for unaligned writes. */ eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap); if (eof) imap.br_startoff = end_fsb; /* fake hole until the end */ /* We never need to allocate blocks for zeroing or unsharing a hole. */ if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) && imap.br_startoff > offset_fsb) { xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff); goto out_unlock; } /* * For zeroing, trim a delalloc extent that extends beyond the EOF * block. If it starts beyond the EOF block, convert it to an * unwritten extent. */ if ((flags & IOMAP_ZERO) && imap.br_startoff <= offset_fsb && isnullstartblock(imap.br_startblock)) { xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); if (offset_fsb >= eof_fsb) goto convert_delay; if (end_fsb > eof_fsb) { end_fsb = eof_fsb; xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); } } /* * Search the COW fork extent list even if we did not find a data fork * extent. This serves two purposes: first this implements the * speculative preallocation using cowextsize, so that we also unshare * block adjacent to shared blocks instead of just the shared blocks * themselves. Second the lookup in the extent list is generally faster * than going out to the shared extent tree. */ if (xfs_is_cow_inode(ip)) { if (!ip->i_cowfp) { ASSERT(!xfs_is_reflink_inode(ip)); xfs_ifork_init_cow(ip); } cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &ccur, &cmap); if (!cow_eof && cmap.br_startoff <= offset_fsb) { trace_xfs_reflink_cow_found(ip, &cmap); goto found_cow; } } if (imap.br_startoff <= offset_fsb) { /* * For reflink files we may need a delalloc reservation when * overwriting shared extents. This includes zeroing of * existing extents that contain data. */ if (!xfs_is_cow_inode(ip) || ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) { trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, &imap); goto found_imap; } xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); /* Trim the mapping to the nearest shared extent boundary. */ error = xfs_bmap_trim_cow(ip, &imap, &shared); if (error) goto out_unlock; /* Not shared? Just report the (potentially capped) extent. */ if (!shared) { trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, &imap); goto found_imap; } /* * Fork all the shared blocks from our write offset until the * end of the extent. */ allocfork = XFS_COW_FORK; end_fsb = imap.br_startoff + imap.br_blockcount; } else { /* * We cap the maximum length we map here to MAX_WRITEBACK_PAGES * pages to keep the chunks of work done where somewhat * symmetric with the work writeback does. This is a completely * arbitrary number pulled out of thin air. * * Note that the values needs to be less than 32-bits wide until * the lower level functions are updated. */ count = min_t(loff_t, count, 1024 * PAGE_SIZE); end_fsb = xfs_iomap_end_fsb(mp, offset, count); if (xfs_is_always_cow_inode(ip)) allocfork = XFS_COW_FORK; } if (eof && offset + count > XFS_ISIZE(ip)) { /* * Determine the initial size of the preallocation. * We clean up any extra preallocation when the file is closed. */ if (xfs_has_allocsize(mp)) prealloc_blocks = mp->m_allocsize_blocks; else if (allocfork == XFS_DATA_FORK) prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, offset, count, &icur); else prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, offset, count, &ccur); if (prealloc_blocks) { xfs_extlen_t align; xfs_off_t end_offset; xfs_fileoff_t p_end_fsb; end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1); p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) + prealloc_blocks; align = xfs_eof_alignment(ip); if (align) p_end_fsb = roundup_64(p_end_fsb, align); p_end_fsb = min(p_end_fsb, XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); ASSERT(p_end_fsb > offset_fsb); prealloc_blocks = p_end_fsb - end_fsb; } } /* * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch * them out if the write happens to fail. */ iomap_flags |= IOMAP_F_NEW; if (allocfork == XFS_COW_FORK) { error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb, end_fsb - offset_fsb, prealloc_blocks, &cmap, &ccur, cow_eof); if (error) goto out_unlock; trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap); goto found_cow; } error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb, end_fsb - offset_fsb, prealloc_blocks, &imap, &icur, eof); if (error) goto out_unlock; trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap); found_imap: seq = xfs_iomap_inode_sequence(ip, iomap_flags); xfs_iunlock(ip, lockmode); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq); convert_delay: xfs_iunlock(ip, lockmode); truncate_pagecache(inode, offset); error = xfs_bmapi_convert_delalloc(ip, XFS_DATA_FORK, offset, iomap, NULL); if (error) return error; trace_xfs_iomap_alloc(ip, offset, count, XFS_DATA_FORK, &imap); return 0; found_cow: if (imap.br_startoff <= offset_fsb) { error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, xfs_iomap_inode_sequence(ip, 0)); if (error) goto out_unlock; } else { xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb); } iomap_flags |= IOMAP_F_SHARED; seq = xfs_iomap_inode_sequence(ip, iomap_flags); xfs_iunlock(ip, lockmode); return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, iomap_flags, seq); out_unlock: xfs_iunlock(ip, lockmode); return error; } static void xfs_buffered_write_delalloc_punch( struct inode *inode, loff_t offset, loff_t length, struct iomap *iomap) { struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); xfs_bmap_punch_delalloc_range(XFS_I(inode), (iomap->flags & IOMAP_F_SHARED) ? XFS_COW_FORK : XFS_DATA_FORK, offset, offset + length, iter->private); } static int xfs_buffered_write_iomap_end( struct inode *inode, loff_t offset, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap) { loff_t start_byte, end_byte; /* If we didn't reserve the blocks, we're not allowed to punch them. */ if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW)) return 0; /* * iomap_page_mkwrite() will never fail in a way that requires delalloc * extents that it allocated to be revoked. Hence never try to release * them here. */ if (flags & IOMAP_FAULT) return 0; /* Nothing to do if we've written the entire delalloc extent */ start_byte = iomap_last_written_block(inode, offset, written); end_byte = round_up(offset + length, i_blocksize(inode)); if (start_byte >= end_byte) return 0; /* For zeroing operations the callers already hold invalidate_lock. */ if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) { rwsem_assert_held_write(&inode->i_mapping->invalidate_lock); iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap, xfs_buffered_write_delalloc_punch); } else { filemap_invalidate_lock(inode->i_mapping); iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap, xfs_buffered_write_delalloc_punch); filemap_invalidate_unlock(inode->i_mapping); } return 0; } const struct iomap_ops xfs_buffered_write_iomap_ops = { .iomap_begin = xfs_buffered_write_iomap_begin, .iomap_end = xfs_buffered_write_iomap_end, }; static int xfs_read_iomap_begin( struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; struct xfs_bmbt_irec imap; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); int nimaps = 1, error = 0; bool shared = false; unsigned int lockmode = XFS_ILOCK_SHARED; u64 seq; ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO))); if (xfs_is_shutdown(mp)) return -EIO; error = xfs_ilock_for_iomap(ip, flags, &lockmode); if (error) return error; error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, &nimaps, 0); if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode))) error = xfs_reflink_trim_around_shared(ip, &imap, &shared); seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0); xfs_iunlock(ip, lockmode); if (error) return error; trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, shared ? IOMAP_F_SHARED : 0, seq); } const struct iomap_ops xfs_read_iomap_ops = { .iomap_begin = xfs_read_iomap_begin, }; static int xfs_seek_iomap_begin( struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF; struct xfs_iext_cursor icur; struct xfs_bmbt_irec imap, cmap; int error = 0; unsigned lockmode; u64 seq; if (xfs_is_shutdown(mp)) return -EIO; lockmode = xfs_ilock_data_map_shared(ip); error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); if (error) goto out_unlock; if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) { /* * If we found a data extent we are done. */ if (imap.br_startoff <= offset_fsb) goto done; data_fsb = imap.br_startoff; } else { /* * Fake a hole until the end of the file. */ data_fsb = xfs_iomap_end_fsb(mp, offset, length); } /* * If a COW fork extent covers the hole, report it - capped to the next * data fork extent: */ if (xfs_inode_has_cow_data(ip) && xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) cow_fsb = cmap.br_startoff; if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) { if (data_fsb < cow_fsb + cmap.br_blockcount) end_fsb = min(end_fsb, data_fsb); xfs_trim_extent(&cmap, offset_fsb, end_fsb - offset_fsb); seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED); error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq); /* * This is a COW extent, so we must probe the page cache * because there could be dirty page cache being backed * by this extent. */ iomap->type = IOMAP_UNWRITTEN; goto out_unlock; } /* * Else report a hole, capped to the next found data or COW extent. */ if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb) imap.br_blockcount = cow_fsb - offset_fsb; else imap.br_blockcount = data_fsb - offset_fsb; imap.br_startoff = offset_fsb; imap.br_startblock = HOLESTARTBLOCK; imap.br_state = XFS_EXT_NORM; done: seq = xfs_iomap_inode_sequence(ip, 0); xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq); out_unlock: xfs_iunlock(ip, lockmode); return error; } const struct iomap_ops xfs_seek_iomap_ops = { .iomap_begin = xfs_seek_iomap_begin, }; static int xfs_xattr_iomap_begin( struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); struct xfs_bmbt_irec imap; int nimaps = 1, error = 0; unsigned lockmode; int seq; if (xfs_is_shutdown(mp)) return -EIO; lockmode = xfs_ilock_attr_map_shared(ip); /* if there are no attribute fork or extents, return ENOENT */ if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) { error = -ENOENT; goto out_unlock; } ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL); error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, &nimaps, XFS_BMAPI_ATTRFORK); out_unlock: seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR); xfs_iunlock(ip, lockmode); if (error) return error; ASSERT(nimaps); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq); } const struct iomap_ops xfs_xattr_iomap_ops = { .iomap_begin = xfs_xattr_iomap_begin, }; int xfs_zero_range( struct xfs_inode *ip, loff_t pos, loff_t len, struct xfs_zone_alloc_ctx *ac, bool *did_zero) { struct inode *inode = VFS_I(ip); xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); if (IS_DAX(inode)) return dax_zero_range(inode, pos, len, did_zero, &xfs_dax_write_iomap_ops); return iomap_zero_range(inode, pos, len, did_zero, &xfs_buffered_write_iomap_ops, ac); } int xfs_truncate_page( struct xfs_inode *ip, loff_t pos, struct xfs_zone_alloc_ctx *ac, bool *did_zero) { struct inode *inode = VFS_I(ip); if (IS_DAX(inode)) return dax_truncate_page(inode, pos, did_zero, &xfs_dax_write_iomap_ops); return iomap_truncate_page(inode, pos, did_zero, &xfs_buffered_write_iomap_ops, ac); } |
1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_DISK_GROUPS_FORMAT_H #define _BCACHEFS_DISK_GROUPS_FORMAT_H #define BCH_SB_LABEL_SIZE 32 struct bch_disk_group { __u8 label[BCH_SB_LABEL_SIZE]; __le64 flags[2]; } __packed __aligned(8); LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1) LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6) LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24) struct bch_sb_field_disk_groups { struct bch_sb_field field; struct bch_disk_group entries[]; } __packed __aligned(8); #endif /* _BCACHEFS_DISK_GROUPS_FORMAT_H */ |
72 72 71 91 91 72 91 5 9 6 8 91 91 91 91 91 86 91 49 89 49 49 49 91 90 91 72 91 91 74 74 43 72 72 86 21 86 86 81 77 77 89 89 89 89 89 91 91 89 91 91 91 90 90 90 90 90 90 90 90 90 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 91 89 91 90 91 90 91 91 89 88 90 90 90 90 85 90 82 82 90 90 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 | // SPDX-License-Identifier: GPL-2.0-or-later /* * 842 Software Compression * * Copyright (C) 2015 Dan Streetman, IBM Corp * * See 842.h for details of the 842 compressed format. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "842_compress" #include <linux/hashtable.h> #include "842.h" #include "842_debugfs.h" #define SW842_HASHTABLE8_BITS (10) #define SW842_HASHTABLE4_BITS (11) #define SW842_HASHTABLE2_BITS (10) /* By default, we allow compressing input buffers of any length, but we must * use the non-standard "short data" template so the decompressor can correctly * reproduce the uncompressed data buffer at the right length. However the * hardware 842 compressor will not recognize the "short data" template, and * will fail to decompress any compressed buffer containing it (I have no idea * why anyone would want to use software to compress and hardware to decompress * but that's beside the point). This parameter forces the compression * function to simply reject any input buffer that isn't a multiple of 8 bytes * long, instead of using the "short data" template, so that all compressed * buffers produced by this function will be decompressable by the 842 hardware * decompressor. Unless you have a specific need for that, leave this disabled * so that any length buffer can be compressed. */ static bool sw842_strict; module_param_named(strict, sw842_strict, bool, 0644); static u8 comp_ops[OPS_MAX][5] = { /* params size in bits */ { I8, N0, N0, N0, 0x19 }, /* 8 */ { I4, I4, N0, N0, 0x18 }, /* 18 */ { I4, I2, I2, N0, 0x17 }, /* 25 */ { I2, I2, I4, N0, 0x13 }, /* 25 */ { I2, I2, I2, I2, 0x12 }, /* 32 */ { I4, I2, D2, N0, 0x16 }, /* 33 */ { I4, D2, I2, N0, 0x15 }, /* 33 */ { I2, D2, I4, N0, 0x0e }, /* 33 */ { D2, I2, I4, N0, 0x09 }, /* 33 */ { I2, I2, I2, D2, 0x11 }, /* 40 */ { I2, I2, D2, I2, 0x10 }, /* 40 */ { I2, D2, I2, I2, 0x0d }, /* 40 */ { D2, I2, I2, I2, 0x08 }, /* 40 */ { I4, D4, N0, N0, 0x14 }, /* 41 */ { D4, I4, N0, N0, 0x04 }, /* 41 */ { I2, I2, D4, N0, 0x0f }, /* 48 */ { I2, D2, I2, D2, 0x0c }, /* 48 */ { I2, D4, I2, N0, 0x0b }, /* 48 */ { D2, I2, I2, D2, 0x07 }, /* 48 */ { D2, I2, D2, I2, 0x06 }, /* 48 */ { D4, I2, I2, N0, 0x03 }, /* 48 */ { I2, D2, D4, N0, 0x0a }, /* 56 */ { D2, I2, D4, N0, 0x05 }, /* 56 */ { D4, I2, D2, N0, 0x02 }, /* 56 */ { D4, D2, I2, N0, 0x01 }, /* 56 */ { D8, N0, N0, N0, 0x00 }, /* 64 */ }; struct sw842_hlist_node8 { struct hlist_node node; u64 data; u8 index; }; struct sw842_hlist_node4 { struct hlist_node node; u32 data; u16 index; }; struct sw842_hlist_node2 { struct hlist_node node; u16 data; u8 index; }; #define INDEX_NOT_FOUND (-1) #define INDEX_NOT_CHECKED (-2) struct sw842_param { u8 *in; u8 *instart; u64 ilen; u8 *out; u64 olen; u8 bit; u64 data8[1]; u32 data4[2]; u16 data2[4]; int index8[1]; int index4[2]; int index2[4]; DECLARE_HASHTABLE(htable8, SW842_HASHTABLE8_BITS); DECLARE_HASHTABLE(htable4, SW842_HASHTABLE4_BITS); DECLARE_HASHTABLE(htable2, SW842_HASHTABLE2_BITS); struct sw842_hlist_node8 node8[1 << I8_BITS]; struct sw842_hlist_node4 node4[1 << I4_BITS]; struct sw842_hlist_node2 node2[1 << I2_BITS]; }; #define get_input_data(p, o, b) \ be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o)))) #define init_hashtable_nodes(p, b) do { \ int _i; \ hash_init((p)->htable##b); \ for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \ (p)->node##b[_i].index = _i; \ (p)->node##b[_i].data = 0; \ INIT_HLIST_NODE(&(p)->node##b[_i].node); \ } \ } while (0) #define find_index(p, b, n) ({ \ struct sw842_hlist_node##b *_n; \ p->index##b[n] = INDEX_NOT_FOUND; \ hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \ if (p->data##b[n] == _n->data) { \ p->index##b[n] = _n->index; \ break; \ } \ } \ p->index##b[n] >= 0; \ }) #define check_index(p, b, n) \ ((p)->index##b[n] == INDEX_NOT_CHECKED \ ? find_index(p, b, n) \ : (p)->index##b[n] >= 0) #define replace_hash(p, b, i, d) do { \ struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \ hash_del(&_n->node); \ _n->data = (p)->data##b[d]; \ pr_debug("add hash index%x %x pos %x data %lx\n", b, \ (unsigned int)_n->index, \ (unsigned int)((p)->in - (p)->instart), \ (unsigned long)_n->data); \ hash_add((p)->htable##b, &_n->node, _n->data); \ } while (0) static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe }; static int add_bits(struct sw842_param *p, u64 d, u8 n); static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s) { int ret; if (n <= s) return -EINVAL; ret = add_bits(p, d >> s, n - s); if (ret) return ret; return add_bits(p, d & GENMASK_ULL(s - 1, 0), s); } static int add_bits(struct sw842_param *p, u64 d, u8 n) { int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits; u64 o; u8 *out = p->out; pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d); if (n > 64) return -EINVAL; /* split this up if writing to > 8 bytes (i.e. n == 64 && p->bit > 0), * or if we're at the end of the output buffer and would write past end */ if (bits > 64) return __split_add_bits(p, d, n, 32); else if (p->olen < 8 && bits > 32 && bits <= 56) return __split_add_bits(p, d, n, 16); else if (p->olen < 4 && bits > 16 && bits <= 24) return __split_add_bits(p, d, n, 8); if (DIV_ROUND_UP(bits, 8) > p->olen) return -ENOSPC; o = *out & bmask[b]; d <<= s; if (bits <= 8) *out = o | d; else if (bits <= 16) put_unaligned(cpu_to_be16(o << 8 | d), (__be16 *)out); else if (bits <= 24) put_unaligned(cpu_to_be32(o << 24 | d << 8), (__be32 *)out); else if (bits <= 32) put_unaligned(cpu_to_be32(o << 24 | d), (__be32 *)out); else if (bits <= 40) put_unaligned(cpu_to_be64(o << 56 | d << 24), (__be64 *)out); else if (bits <= 48) put_unaligned(cpu_to_be64(o << 56 | d << 16), (__be64 *)out); else if (bits <= 56) put_unaligned(cpu_to_be64(o << 56 | d << 8), (__be64 *)out); else put_unaligned(cpu_to_be64(o << 56 | d), (__be64 *)out); p->bit += n; if (p->bit > 7) { p->out += p->bit / 8; p->olen -= p->bit / 8; p->bit %= 8; } return 0; } static int add_template(struct sw842_param *p, u8 c) { int ret, i, b = 0; u8 *t = comp_ops[c]; bool inv = false; if (c >= OPS_MAX) return -EINVAL; pr_debug("template %x\n", t[4]); ret = add_bits(p, t[4], OP_BITS); if (ret) return ret; for (i = 0; i < 4; i++) { pr_debug("op %x\n", t[i]); switch (t[i] & OP_AMOUNT) { case OP_AMOUNT_8: if (b) inv = true; else if (t[i] & OP_ACTION_INDEX) ret = add_bits(p, p->index8[0], I8_BITS); else if (t[i] & OP_ACTION_DATA) ret = add_bits(p, p->data8[0], 64); else inv = true; break; case OP_AMOUNT_4: if (b == 2 && t[i] & OP_ACTION_DATA) ret = add_bits(p, get_input_data(p, 2, 32), 32); else if (b != 0 && b != 4) inv = true; else if (t[i] & OP_ACTION_INDEX) ret = add_bits(p, p->index4[b >> 2], I4_BITS); else if (t[i] & OP_ACTION_DATA) ret = add_bits(p, p->data4[b >> 2], 32); else inv = true; break; case OP_AMOUNT_2: if (b != 0 && b != 2 && b != 4 && b != 6) inv = true; if (t[i] & OP_ACTION_INDEX) ret = add_bits(p, p->index2[b >> 1], I2_BITS); else if (t[i] & OP_ACTION_DATA) ret = add_bits(p, p->data2[b >> 1], 16); else inv = true; break; case OP_AMOUNT_0: inv = (b != 8) || !(t[i] & OP_ACTION_NOOP); break; default: inv = true; break; } if (ret) return ret; if (inv) { pr_err("Invalid templ %x op %d : %x %x %x %x\n", c, i, t[0], t[1], t[2], t[3]); return -EINVAL; } b += t[i] & OP_AMOUNT; } if (b != 8) { pr_err("Invalid template %x len %x : %x %x %x %x\n", c, b, t[0], t[1], t[2], t[3]); return -EINVAL; } if (sw842_template_counts) atomic_inc(&template_count[t[4]]); return 0; } static int add_repeat_template(struct sw842_param *p, u8 r) { int ret; /* repeat param is 0-based */ if (!r || --r > REPEAT_BITS_MAX) return -EINVAL; ret = add_bits(p, OP_REPEAT, OP_BITS); if (ret) return ret; ret = add_bits(p, r, REPEAT_BITS); if (ret) return ret; if (sw842_template_counts) atomic_inc(&template_repeat_count); return 0; } static int add_short_data_template(struct sw842_param *p, u8 b) { int ret, i; if (!b || b > SHORT_DATA_BITS_MAX) return -EINVAL; ret = add_bits(p, OP_SHORT_DATA, OP_BITS); if (ret) return ret; ret = add_bits(p, b, SHORT_DATA_BITS); if (ret) return ret; for (i = 0; i < b; i++) { ret = add_bits(p, p->in[i], 8); if (ret) return ret; } if (sw842_template_counts) atomic_inc(&template_short_data_count); return 0; } static int add_zeros_template(struct sw842_param *p) { int ret = add_bits(p, OP_ZEROS, OP_BITS); if (ret) return ret; if (sw842_template_counts) atomic_inc(&template_zeros_count); return 0; } static int add_end_template(struct sw842_param *p) { int ret = add_bits(p, OP_END, OP_BITS); if (ret) return ret; if (sw842_template_counts) atomic_inc(&template_end_count); return 0; } static bool check_template(struct sw842_param *p, u8 c) { u8 *t = comp_ops[c]; int i, match, b = 0; if (c >= OPS_MAX) return false; for (i = 0; i < 4; i++) { if (t[i] & OP_ACTION_INDEX) { if (t[i] & OP_AMOUNT_2) match = check_index(p, 2, b >> 1); else if (t[i] & OP_AMOUNT_4) match = check_index(p, 4, b >> 2); else if (t[i] & OP_AMOUNT_8) match = check_index(p, 8, 0); else return false; if (!match) return false; } b += t[i] & OP_AMOUNT; } return true; } static void get_next_data(struct sw842_param *p) { p->data8[0] = get_input_data(p, 0, 64); p->data4[0] = get_input_data(p, 0, 32); p->data4[1] = get_input_data(p, 4, 32); p->data2[0] = get_input_data(p, 0, 16); p->data2[1] = get_input_data(p, 2, 16); p->data2[2] = get_input_data(p, 4, 16); p->data2[3] = get_input_data(p, 6, 16); } /* update the hashtable entries. * only call this after finding/adding the current template * the dataN fields for the current 8 byte block must be already updated */ static void update_hashtables(struct sw842_param *p) { u64 pos = p->in - p->instart; u64 n8 = (pos >> 3) % (1 << I8_BITS); u64 n4 = (pos >> 2) % (1 << I4_BITS); u64 n2 = (pos >> 1) % (1 << I2_BITS); replace_hash(p, 8, n8, 0); replace_hash(p, 4, n4, 0); replace_hash(p, 4, n4, 1); replace_hash(p, 2, n2, 0); replace_hash(p, 2, n2, 1); replace_hash(p, 2, n2, 2); replace_hash(p, 2, n2, 3); } /* find the next template to use, and add it * the p->dataN fields must already be set for the current 8 byte block */ static int process_next(struct sw842_param *p) { int ret, i; p->index8[0] = INDEX_NOT_CHECKED; p->index4[0] = INDEX_NOT_CHECKED; p->index4[1] = INDEX_NOT_CHECKED; p->index2[0] = INDEX_NOT_CHECKED; p->index2[1] = INDEX_NOT_CHECKED; p->index2[2] = INDEX_NOT_CHECKED; p->index2[3] = INDEX_NOT_CHECKED; /* check up to OPS_MAX - 1; last op is our fallback */ for (i = 0; i < OPS_MAX - 1; i++) { if (check_template(p, i)) break; } ret = add_template(p, i); if (ret) return ret; return 0; } /** * sw842_compress * * Compress the uncompressed buffer of length @ilen at @in to the output buffer * @out, using no more than @olen bytes, using the 842 compression format. * * Returns: 0 on success, error on failure. The @olen parameter * will contain the number of output bytes written on success, or * 0 on error. */ int sw842_compress(const u8 *in, unsigned int ilen, u8 *out, unsigned int *olen, void *wmem) { struct sw842_param *p = (struct sw842_param *)wmem; int ret; u64 last, next, pad, total; u8 repeat_count = 0; u32 crc; BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS); init_hashtable_nodes(p, 8); init_hashtable_nodes(p, 4); init_hashtable_nodes(p, 2); p->in = (u8 *)in; p->instart = p->in; p->ilen = ilen; p->out = out; p->olen = *olen; p->bit = 0; total = p->olen; *olen = 0; /* if using strict mode, we can only compress a multiple of 8 */ if (sw842_strict && (ilen % 8)) { pr_err("Using strict mode, can't compress len %d\n", ilen); return -EINVAL; } /* let's compress at least 8 bytes, mkay? */ if (unlikely(ilen < 8)) goto skip_comp; /* make initial 'last' different so we don't match the first time */ last = ~get_unaligned((u64 *)p->in); while (p->ilen > 7) { next = get_unaligned((u64 *)p->in); /* must get the next data, as we need to update the hashtable * entries with the new data every time */ get_next_data(p); /* we don't care about endianness in last or next; * we're just comparing 8 bytes to another 8 bytes, * they're both the same endianness */ if (next == last) { /* repeat count bits are 0-based, so we stop at +1 */ if (++repeat_count <= REPEAT_BITS_MAX) goto repeat; } if (repeat_count) { ret = add_repeat_template(p, repeat_count); if (ret) return ret; repeat_count = 0; if (next == last) /* reached max repeat bits */ goto repeat; } if (next == 0) ret = add_zeros_template(p); else ret = process_next(p); if (ret) return ret; repeat: last = next; update_hashtables(p); p->in += 8; p->ilen -= 8; } if (repeat_count) { ret = add_repeat_template(p, repeat_count); if (ret) return ret; } skip_comp: if (p->ilen > 0) { ret = add_short_data_template(p, p->ilen); if (ret) return ret; p->in += p->ilen; p->ilen = 0; } ret = add_end_template(p); if (ret) return ret; /* * crc(0:31) is appended to target data starting with the next * bit after End of stream template. * nx842 calculates CRC for data in big-endian format. So doing * same here so that sw842 decompression can be used for both * compressed data. */ crc = crc32_be(0, in, ilen); ret = add_bits(p, crc, CRC_BITS); if (ret) return ret; if (p->bit) { p->out++; p->olen--; p->bit = 0; } /* pad compressed length to multiple of 8 */ pad = (8 - ((total - p->olen) % 8)) % 8; if (pad) { if (pad > p->olen) /* we were so close! */ return -ENOSPC; memset(p->out, 0, pad); p->out += pad; p->olen -= pad; } if (unlikely((total - p->olen) > UINT_MAX)) return -ENOSPC; *olen = total - p->olen; return 0; } EXPORT_SYMBOL_GPL(sw842_compress); static int __init sw842_init(void) { if (sw842_template_counts) sw842_debugfs_create(); return 0; } module_init(sw842_init); static void __exit sw842_exit(void) { if (sw842_template_counts) sw842_debugfs_remove(); } module_exit(sw842_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Software 842 Compressor"); MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); |
11 11 11 10 8 10 13 10 20 20 20 20 13 13 13 20 20 21 21 3 3 3 1 2 2 2 16 16 16 16 2 16 16 16 6 6 6 6 2 3 16 13 13 4 13 13 13 13 17 17 17 17 22 17 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 | // SPDX-License-Identifier: GPL-2.0-only /* * net/dccp/ackvec.c * * An implementation of Ack Vectors for the DCCP protocol * Copyright (c) 2007 University of Aberdeen, Scotland, UK * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net> */ #include "dccp.h" #include <linux/kernel.h> #include <linux/slab.h> #include <linux/export.h> static struct kmem_cache *dccp_ackvec_slab; static struct kmem_cache *dccp_ackvec_record_slab; struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority) { struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority); if (av != NULL) { av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1; INIT_LIST_HEAD(&av->av_records); } return av; } static void dccp_ackvec_purge_records(struct dccp_ackvec *av) { struct dccp_ackvec_record *cur, *next; list_for_each_entry_safe(cur, next, &av->av_records, avr_node) kmem_cache_free(dccp_ackvec_record_slab, cur); INIT_LIST_HEAD(&av->av_records); } void dccp_ackvec_free(struct dccp_ackvec *av) { if (likely(av != NULL)) { dccp_ackvec_purge_records(av); kmem_cache_free(dccp_ackvec_slab, av); } } /** * dccp_ackvec_update_records - Record information about sent Ack Vectors * @av: Ack Vector records to update * @seqno: Sequence number of the packet carrying the Ack Vector just sent * @nonce_sum: The sum of all buffer nonces contained in the Ack Vector */ int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum) { struct dccp_ackvec_record *avr; avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC); if (avr == NULL) return -ENOBUFS; avr->avr_ack_seqno = seqno; avr->avr_ack_ptr = av->av_buf_head; avr->avr_ack_ackno = av->av_buf_ackno; avr->avr_ack_nonce = nonce_sum; avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head); /* * When the buffer overflows, we keep no more than one record. This is * the simplest way of disambiguating sender-Acks dating from before the * overflow from sender-Acks which refer to after the overflow; a simple * solution is preferable here since we are handling an exception. */ if (av->av_overflow) dccp_ackvec_purge_records(av); /* * Since GSS is incremented for each packet, the list is automatically * arranged in descending order of @ack_seqno. */ list_add(&avr->avr_node, &av->av_records); dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n", (unsigned long long)avr->avr_ack_seqno, (unsigned long long)avr->avr_ack_ackno, avr->avr_ack_runlen); return 0; } static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list, const u64 ackno) { struct dccp_ackvec_record *avr; /* * Exploit that records are inserted in descending order of sequence * number, start with the oldest record first. If @ackno is `before' * the earliest ack_ackno, the packet is too old to be considered. */ list_for_each_entry_reverse(avr, av_list, avr_node) { if (avr->avr_ack_seqno == ackno) return avr; if (before48(ackno, avr->avr_ack_seqno)) break; } return NULL; } /* * Buffer index and length computation using modulo-buffersize arithmetic. * Note that, as pointers move from right to left, head is `before' tail. */ static inline u16 __ackvec_idx_add(const u16 a, const u16 b) { return (a + b) % DCCPAV_MAX_ACKVEC_LEN; } static inline u16 __ackvec_idx_sub(const u16 a, const u16 b) { return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b); } u16 dccp_ackvec_buflen(const struct dccp_ackvec *av) { if (unlikely(av->av_overflow)) return DCCPAV_MAX_ACKVEC_LEN; return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head); } /** * dccp_ackvec_update_old - Update previous state as per RFC 4340, 11.4.1 * @av: non-empty buffer to update * @distance: negative or zero distance of @seqno from buf_ackno downward * @seqno: the (old) sequence number whose record is to be updated * @state: state in which packet carrying @seqno was received */ static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance, u64 seqno, enum dccp_ackvec_states state) { u16 ptr = av->av_buf_head; BUG_ON(distance > 0); if (unlikely(dccp_ackvec_is_empty(av))) return; do { u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr); if (distance + runlen >= 0) { /* * Only update the state if packet has not been received * yet. This is OK as per the second table in RFC 4340, * 11.4.1; i.e. here we are using the following table: * RECEIVED * 0 1 3 * S +---+---+---+ * T 0 | 0 | 0 | 0 | * O +---+---+---+ * R 1 | 1 | 1 | 1 | * E +---+---+---+ * D 3 | 0 | 1 | 3 | * +---+---+---+ * The "Not Received" state was set by reserve_seats(). */ if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED) av->av_buf[ptr] = state; else dccp_pr_debug("Not changing %llu state to %u\n", (unsigned long long)seqno, state); break; } distance += runlen + 1; ptr = __ackvec_idx_add(ptr, 1); } while (ptr != av->av_buf_tail); } /* Mark @num entries after buf_head as "Not yet received". */ static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num) { u16 start = __ackvec_idx_add(av->av_buf_head, 1), len = DCCPAV_MAX_ACKVEC_LEN - start; /* check for buffer wrap-around */ if (num > len) { memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len); start = 0; num -= len; } if (num) memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num); } /** * dccp_ackvec_add_new - Record one or more new entries in Ack Vector buffer * @av: container of buffer to update (can be empty or non-empty) * @num_packets: number of packets to register (must be >= 1) * @seqno: sequence number of the first packet in @num_packets * @state: state in which packet carrying @seqno was received */ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets, u64 seqno, enum dccp_ackvec_states state) { u32 num_cells = num_packets; if (num_packets > DCCPAV_BURST_THRESH) { u32 lost_packets = num_packets - 1; DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets); /* * We received 1 packet and have a loss of size "num_packets-1" * which we squeeze into num_cells-1 rather than reserving an * entire byte for each lost packet. * The reason is that the vector grows in O(burst_length); when * it grows too large there will no room left for the payload. * This is a trade-off: if a few packets out of the burst show * up later, their state will not be changed; it is simply too * costly to reshuffle/reallocate/copy the buffer each time. * Should such problems persist, we will need to switch to a * different underlying data structure. */ for (num_packets = num_cells = 1; lost_packets; ++num_cells) { u8 len = min_t(u32, lost_packets, DCCPAV_MAX_RUNLEN); av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1); av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len; lost_packets -= len; } } if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) { DCCP_CRIT("Ack Vector buffer overflow: dropping old entries"); av->av_overflow = true; } av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets); if (av->av_overflow) av->av_buf_tail = av->av_buf_head; av->av_buf[av->av_buf_head] = state; av->av_buf_ackno = seqno; if (num_packets > 1) dccp_ackvec_reserve_seats(av, num_packets - 1); } /** * dccp_ackvec_input - Register incoming packet in the buffer * @av: Ack Vector to register packet to * @skb: Packet to register */ void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb) { u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq; enum dccp_ackvec_states state = DCCPAV_RECEIVED; if (dccp_ackvec_is_empty(av)) { dccp_ackvec_add_new(av, 1, seqno, state); av->av_tail_ackno = seqno; } else { s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno); u8 *current_head = av->av_buf + av->av_buf_head; if (num_packets == 1 && dccp_ackvec_state(current_head) == state && dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) { *current_head += 1; av->av_buf_ackno = seqno; } else if (num_packets > 0) { dccp_ackvec_add_new(av, num_packets, seqno, state); } else { dccp_ackvec_update_old(av, num_packets, seqno, state); } } } /** * dccp_ackvec_clear_state - Perform house-keeping / garbage-collection * @av: Ack Vector record to clean * @ackno: last Ack Vector which has been acknowledged * * This routine is called when the peer acknowledges the receipt of Ack Vectors * up to and including @ackno. While based on section A.3 of RFC 4340, here * are additional precautions to prevent corrupted buffer state. In particular, * we use tail_ackno to identify outdated records; it always marks the earliest * packet of group (2) in 11.4.2. */ void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno) { struct dccp_ackvec_record *avr, *next; u8 runlen_now, eff_runlen; s64 delta; avr = dccp_ackvec_lookup(&av->av_records, ackno); if (avr == NULL) return; /* * Deal with outdated acknowledgments: this arises when e.g. there are * several old records and the acks from the peer come in slowly. In * that case we may still have records that pre-date tail_ackno. */ delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno); if (delta < 0) goto free_records; /* * Deal with overlapping Ack Vectors: don't subtract more than the * number of packets between tail_ackno and ack_ackno. */ eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen; runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr); /* * The run length of Ack Vector cells does not decrease over time. If * the run length is the same as at the time the Ack Vector was sent, we * free the ack_ptr cell. That cell can however not be freed if the run * length has increased: in this case we need to move the tail pointer * backwards (towards higher indices), to its next-oldest neighbour. */ if (runlen_now > eff_runlen) { av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1; av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1); /* This move may not have cleared the overflow flag. */ if (av->av_overflow) av->av_overflow = (av->av_buf_head == av->av_buf_tail); } else { av->av_buf_tail = avr->avr_ack_ptr; /* * We have made sure that avr points to a valid cell within the * buffer. This cell is either older than head, or equals head * (empty buffer): in both cases we no longer have any overflow. */ av->av_overflow = 0; } /* * The peer has acknowledged up to and including ack_ackno. Hence the * first packet in group (2) of 11.4.2 is the successor of ack_ackno. */ av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1); free_records: list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) { list_del(&avr->avr_node); kmem_cache_free(dccp_ackvec_record_slab, avr); } } /* * Routines to keep track of Ack Vectors received in an skb */ int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce) { struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC); if (new == NULL) return -ENOBUFS; new->vec = vec; new->len = len; new->nonce = nonce; list_add_tail(&new->node, head); return 0; } EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add); void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks) { struct dccp_ackvec_parsed *cur, *next; list_for_each_entry_safe(cur, next, parsed_chunks, node) kfree(cur); INIT_LIST_HEAD(parsed_chunks); } EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup); int __init dccp_ackvec_init(void) { dccp_ackvec_slab = KMEM_CACHE(dccp_ackvec, SLAB_HWCACHE_ALIGN); if (dccp_ackvec_slab == NULL) goto out_err; dccp_ackvec_record_slab = KMEM_CACHE(dccp_ackvec_record, SLAB_HWCACHE_ALIGN); if (dccp_ackvec_record_slab == NULL) goto out_destroy_slab; return 0; out_destroy_slab: kmem_cache_destroy(dccp_ackvec_slab); dccp_ackvec_slab = NULL; out_err: DCCP_CRIT("Unable to create Ack Vector slab cache"); return -ENOBUFS; } void dccp_ackvec_exit(void) { kmem_cache_destroy(dccp_ackvec_slab); dccp_ackvec_slab = NULL; kmem_cache_destroy(dccp_ackvec_record_slab); dccp_ackvec_record_slab = NULL; } |
581 412 411 409 411 412 348 411 331 1502 331 1506 162 147 162 35 35 482 313 311 311 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 | // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/atomic.h> #include <linux/inetdevice.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <net/netfilter/nf_nat_masquerade.h> struct masq_dev_work { struct work_struct work; struct net *net; netns_tracker ns_tracker; union nf_inet_addr addr; int ifindex; int (*iter)(struct nf_conn *i, void *data); }; #define MAX_MASQ_WORKER_COUNT 16 static DEFINE_MUTEX(masq_mutex); static unsigned int masq_refcnt __read_mostly; static atomic_t masq_worker_count __read_mostly; unsigned int nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, const struct nf_nat_range2 *range, const struct net_device *out) { struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; struct nf_nat_range2 newrange; const struct rtable *rt; __be32 newsrc, nh; WARN_ON(hooknum != NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) return NF_ACCEPT; rt = skb_rtable(skb); nh = rt_nexthop(rt, ip_hdr(skb)->daddr); newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE); if (!newsrc) { pr_info("%s ate my IP address\n", out->name); return NF_DROP; } nat = nf_ct_nat_ext_add(ct); if (nat) nat->masq_index = out->ifindex; /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newsrc; newrange.max_addr.ip = newsrc; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); static void iterate_cleanup_work(struct work_struct *work) { struct nf_ct_iter_data iter_data = {}; struct masq_dev_work *w; w = container_of(work, struct masq_dev_work, work); iter_data.net = w->net; iter_data.data = (void *)w; nf_ct_iterate_cleanup_net(w->iter, &iter_data); put_net_track(w->net, &w->ns_tracker); kfree(w); atomic_dec(&masq_worker_count); module_put(THIS_MODULE); } /* Iterate conntrack table in the background and remove conntrack entries * that use the device/address being removed. * * In case too many work items have been queued already or memory allocation * fails iteration is skipped, conntrack entries will time out eventually. */ static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr, int ifindex, int (*iter)(struct nf_conn *i, void *data), gfp_t gfp_flags) { struct masq_dev_work *w; if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT) return; net = maybe_get_net(net); if (!net) return; if (!try_module_get(THIS_MODULE)) goto err_module; w = kzalloc(sizeof(*w), gfp_flags); if (w) { /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */ atomic_inc(&masq_worker_count); INIT_WORK(&w->work, iterate_cleanup_work); w->ifindex = ifindex; w->net = net; netns_tracker_alloc(net, &w->ns_tracker, gfp_flags); w->iter = iter; if (addr) w->addr = *addr; schedule_work(&w->work); return; } module_put(THIS_MODULE); err_module: put_net(net); } static int device_cmp(struct nf_conn *i, void *arg) { const struct nf_conn_nat *nat = nfct_nat(i); const struct masq_dev_work *w = arg; if (!nat) return 0; return nat->masq_index == w->ifindex; } static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_DOWN) { /* Device was downed. Search entire table for * conntracks which were associated with that device, * and forget them. */ nf_nat_masq_schedule(net, NULL, dev->ifindex, device_cmp, GFP_KERNEL); } return NOTIFY_DONE; } static int inet_cmp(struct nf_conn *ct, void *ptr) { struct nf_conntrack_tuple *tuple; struct masq_dev_work *w = ptr; if (!device_cmp(ct, ptr)) return 0; tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3); } static int masq_inet_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct in_ifaddr *ifa = ptr; const struct in_device *idev; const struct net_device *dev; union nf_inet_addr addr; if (event != NETDEV_DOWN) return NOTIFY_DONE; /* The masq_dev_notifier will catch the case of the device going * down. So if the inetdev is dead and being destroyed we have * no work to do. Otherwise this is an individual address removal * and we have to perform the flush. */ idev = ifa->ifa_dev; if (idev->dead) return NOTIFY_DONE; memset(&addr, 0, sizeof(addr)); addr.ip = ifa->ifa_address; dev = idev->dev; nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex, inet_cmp, GFP_KERNEL); return NOTIFY_DONE; } static struct notifier_block masq_dev_notifier = { .notifier_call = masq_device_event, }; static struct notifier_block masq_inet_notifier = { .notifier_call = masq_inet_event, }; #if IS_ENABLED(CONFIG_IPV6) static int nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr) { #ifdef CONFIG_IPV6_MODULE const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return -EHOSTUNREACH; return v6_ops->dev_get_saddr(net, dev, daddr, srcprefs, saddr); #else return ipv6_dev_get_saddr(net, dev, daddr, srcprefs, saddr); #endif } unsigned int nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, const struct net_device *out) { enum ip_conntrack_info ctinfo; struct nf_conn_nat *nat; struct in6_addr src; struct nf_conn *ct; struct nf_nat_range2 newrange; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out, &ipv6_hdr(skb)->daddr, 0, &src) < 0) return NF_DROP; nat = nf_ct_nat_ext_add(ct); if (nat) nat->masq_index = out->ifindex; newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.in6 = src; newrange.max_addr.in6 = src; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6); /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep). * * Defer it to the system workqueue. * * As we can have 'a lot' of inet_events (depending on amount of ipv6 * addresses being deleted), we also need to limit work item queue. */ static int masq_inet6_event(struct notifier_block *this, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = ptr; const struct net_device *dev; union nf_inet_addr addr; if (event != NETDEV_DOWN) return NOTIFY_DONE; dev = ifa->idev->dev; memset(&addr, 0, sizeof(addr)); addr.in6 = ifa->addr; nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp, GFP_ATOMIC); return NOTIFY_DONE; } static struct notifier_block masq_inet6_notifier = { .notifier_call = masq_inet6_event, }; static int nf_nat_masquerade_ipv6_register_notifier(void) { return register_inet6addr_notifier(&masq_inet6_notifier); } #else static inline int nf_nat_masquerade_ipv6_register_notifier(void) { return 0; } #endif int nf_nat_masquerade_inet_register_notifiers(void) { int ret = 0; mutex_lock(&masq_mutex); if (WARN_ON_ONCE(masq_refcnt == UINT_MAX)) { ret = -EOVERFLOW; goto out_unlock; } /* check if the notifier was already set */ if (++masq_refcnt > 1) goto out_unlock; /* Register for device down reports */ ret = register_netdevice_notifier(&masq_dev_notifier); if (ret) goto err_dec; /* Register IP address change reports */ ret = register_inetaddr_notifier(&masq_inet_notifier); if (ret) goto err_unregister; ret = nf_nat_masquerade_ipv6_register_notifier(); if (ret) goto err_unreg_inet; mutex_unlock(&masq_mutex); return ret; err_unreg_inet: unregister_inetaddr_notifier(&masq_inet_notifier); err_unregister: unregister_netdevice_notifier(&masq_dev_notifier); err_dec: masq_refcnt--; out_unlock: mutex_unlock(&masq_mutex); return ret; } EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_register_notifiers); void nf_nat_masquerade_inet_unregister_notifiers(void) { mutex_lock(&masq_mutex); /* check if the notifiers still have clients */ if (--masq_refcnt > 0) goto out_unlock; unregister_netdevice_notifier(&masq_dev_notifier); unregister_inetaddr_notifier(&masq_inet_notifier); #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&masq_inet6_notifier); #endif out_unlock: mutex_unlock(&masq_mutex); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_unregister_notifiers); |
2 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2017 Google, Inc. */ #ifndef _LINUX_BINDER_ALLOC_H #define _LINUX_BINDER_ALLOC_H #include <linux/rbtree.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/rtmutex.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/list_lru.h> #include <uapi/linux/android/binder.h> extern struct list_lru binder_freelist; struct binder_transaction; /** * struct binder_buffer - buffer used for binder transactions * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees * @free: %true if buffer is free * @clear_on_free: %true if buffer must be zeroed after use * @allow_user_free: %true if user is allowed to free buffer * @async_transaction: %true if buffer is in use for an async txn * @oneway_spam_suspect: %true if total async allocate size just exceed * spamming detect threshold * @debug_id: unique ID for debugging * @transaction: pointer to associated struct binder_transaction * @target_node: struct binder_node associated with this buffer * @data_size: size of @transaction data * @offsets_size: size of array of offsets * @extra_buffers_size: size of space for other objects (like sg lists) * @user_data: user pointer to base of buffer space * @pid: pid to attribute the buffer to (caller) * * Bookkeeping structure for binder transaction buffers */ struct binder_buffer { struct list_head entry; /* free and allocated entries by address */ struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; unsigned clear_on_free:1; unsigned allow_user_free:1; unsigned async_transaction:1; unsigned oneway_spam_suspect:1; unsigned debug_id:27; struct binder_transaction *transaction; struct binder_node *target_node; size_t data_size; size_t offsets_size; size_t extra_buffers_size; unsigned long user_data; int pid; }; /** * struct binder_shrinker_mdata - binder metadata used to reclaim pages * @lru: LRU entry in binder_freelist * @alloc: binder_alloc owning the page to reclaim * @page_index: offset in @alloc->pages[] into the page to reclaim */ struct binder_shrinker_mdata { struct list_head lru; struct binder_alloc *alloc; unsigned long page_index; }; static inline struct list_head *page_to_lru(struct page *p) { struct binder_shrinker_mdata *mdata; mdata = (struct binder_shrinker_mdata *)page_private(p); return &mdata->lru; } /** * struct binder_alloc - per-binder proc state for binder allocator * @mutex: protects binder_alloc fields * @mm: copy of task->mm (invariant after open) * @vm_start: base of per-proc address space mapped via mmap * @buffers: list of all buffers for this proc * @free_buffers: rb tree of buffers available for allocation * sorted by size * @allocated_buffers: rb tree of allocated buffers sorted by address * @free_async_space: VA space available for async buffers. This is * initialized at mmap time to 1/2 the full VA space * @pages: array of struct page * * @buffer_size: size of address space specified via mmap * @pid: pid for associated binder_proc (invariant after init) * @pages_high: high watermark of offset in @pages * @mapped: whether the vm area is mapped, each binder instance is * allowed a single mapping throughout its lifetime * @oneway_spam_detected: %true if oneway spam detection fired, clear that * flag once the async buffer has returned to a healthy state * * Bookkeeping structure for per-proc address space management for binder * buffers. It is normally initialized during binder_init() and binder_mmap() * calls. The address space is used for both user-visible buffers and for * struct binder_buffer objects used to track the user buffers */ struct binder_alloc { struct mutex mutex; struct mm_struct *mm; unsigned long vm_start; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; size_t free_async_space; struct page **pages; size_t buffer_size; int pid; size_t pages_high; bool mapped; bool oneway_spam_detected; }; #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST void binder_selftest_alloc(struct binder_alloc *alloc); #else static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} #endif enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, void *cb_arg); struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, int is_async); void binder_alloc_init(struct binder_alloc *alloc); int binder_alloc_shrinker_init(void); void binder_alloc_shrinker_exit(void); void binder_alloc_vma_close(struct binder_alloc *alloc); struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, unsigned long user_ptr); void binder_alloc_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffer); int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct vm_area_struct *vma); void binder_alloc_deferred_release(struct binder_alloc *alloc); int binder_alloc_get_allocated_count(struct binder_alloc *alloc); void binder_alloc_print_allocated(struct seq_file *m, struct binder_alloc *alloc); void binder_alloc_print_pages(struct seq_file *m, struct binder_alloc *alloc); /** * binder_alloc_get_free_async_space() - get free space available for async * @alloc: binder_alloc for this proc * * Return: the bytes remaining in the address-space for async transactions */ static inline size_t binder_alloc_get_free_async_space(struct binder_alloc *alloc) { size_t free_async_space; mutex_lock(&alloc->mutex); free_async_space = alloc->free_async_space; mutex_unlock(&alloc->mutex); return free_async_space; } unsigned long binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t buffer_offset, const void __user *from, size_t bytes); int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t buffer_offset, void *src, size_t bytes); int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, void *dest, struct binder_buffer *buffer, binder_size_t buffer_offset, size_t bytes); #endif /* _LINUX_BINDER_ALLOC_H */ |
19 19 19 19 19 19 17 14 14 17 16 17 17 1 17 1 1 17 16 17 14 17 17 17 17 15 3 2 1 5 5 5 1 19 19 19 19 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 | /* inffast.c -- fast decoding * Copyright (C) 1995-2004 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include <linux/zutil.h> #include "inftrees.h" #include "inflate.h" #include "inffast.h" #ifndef ASMINF union uu { unsigned short us; unsigned char b[2]; }; /* Endian independent version */ static inline unsigned short get_unaligned16(const unsigned short *p) { union uu mm; unsigned char *b = (unsigned char *)p; mm.b[0] = b[0]; mm.b[1] = b[1]; return mm.us; } /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is available, an end-of-block is encountered, or a data error is encountered. When large enough input and output buffers are supplied to inflate(), for example, a 16K input buffer and a 64K output buffer, more than 95% of the inflate execution time is spent in this routine. Entry assumptions: state->mode == LEN strm->avail_in >= 6 strm->avail_out >= 258 start >= strm->avail_out state->bits < 8 On return, state->mode is one of: LEN -- ran out of enough output space or enough available input TYPE -- reached end of block code, inflate() to interpret next block BAD -- error in block data Notes: - The maximum input bits used by a length/distance pair is 15 bits for the length code, 5 bits for the length extra, 15 bits for the distance code, and 13 bits for the distance extra. This totals 48 bits, or six bytes. Therefore if strm->avail_in >= 6, then there is enough input to avoid checking for available input while decoding. - The maximum bytes that a single length/distance pair can output is 258 bytes, which is the maximum length that can be coded. inflate_fast() requires strm->avail_out >= 258 for each loop to avoid checking for output space. - @start: inflate()'s starting value for strm->avail_out */ void inflate_fast(z_streamp strm, unsigned start) { struct inflate_state *state; const unsigned char *in; /* local strm->next_in */ const unsigned char *last; /* while in < last, enough input available */ unsigned char *out; /* local strm->next_out */ unsigned char *beg; /* inflate()'s initial strm->next_out */ unsigned char *end; /* while out < end, enough space available */ #ifdef INFLATE_STRICT unsigned dmax; /* maximum distance from zlib header */ #endif unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ unsigned write; /* window write index */ unsigned char *window; /* allocated sliding window, if wsize != 0 */ unsigned long hold; /* local strm->hold */ unsigned bits; /* local strm->bits */ code const *lcode; /* local strm->lencode */ code const *dcode; /* local strm->distcode */ unsigned lmask; /* mask for first level of length codes */ unsigned dmask; /* mask for first level of distance codes */ code this; /* retrieved table entry */ unsigned op; /* code bits, operation, extra bits, or */ /* window position, window bytes to copy */ unsigned len; /* match length, unused bytes */ unsigned dist; /* match distance */ unsigned char *from; /* where to copy match from */ /* copy state to local variables */ state = (struct inflate_state *)strm->state; in = strm->next_in; last = in + (strm->avail_in - 5); out = strm->next_out; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT dmax = state->dmax; #endif wsize = state->wsize; whave = state->whave; write = state->write; window = state->window; hold = state->hold; bits = state->bits; lcode = state->lencode; dcode = state->distcode; lmask = (1U << state->lenbits) - 1; dmask = (1U << state->distbits) - 1; /* decode literals and length/distances until end-of-block or not enough input data or output space */ do { if (bits < 15) { hold += (unsigned long)(*in++) << bits; bits += 8; hold += (unsigned long)(*in++) << bits; bits += 8; } this = lcode[hold & lmask]; dolen: op = (unsigned)(this.bits); hold >>= op; bits -= op; op = (unsigned)(this.op); if (op == 0) { /* literal */ *out++ = (unsigned char)(this.val); } else if (op & 16) { /* length base */ len = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); hold >>= op; bits -= op; } if (bits < 15) { hold += (unsigned long)(*in++) << bits; bits += 8; hold += (unsigned long)(*in++) << bits; bits += 8; } this = dcode[hold & dmask]; dodist: op = (unsigned)(this.bits); hold >>= op; bits -= op; op = (unsigned)(this.op); if (op & 16) { /* distance base */ dist = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; } } dist += (unsigned)hold & ((1U << op) - 1); #ifdef INFLATE_STRICT if (dist > dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif hold >>= op; bits -= op; op = (unsigned)(out - beg); /* max distance in output */ if (dist > op) { /* see if copy from window */ op = dist - op; /* distance back in window */ if (op > whave) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } from = window; if (write == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } else if (write < op) { /* wrap around window */ from += wsize + write - op; op -= write; if (op < len) { /* some from end of window */ len -= op; do { *out++ = *from++; } while (--op); from = window; if (write < len) { /* some from start of window */ op = write; len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } } else { /* contiguous in window */ from += write - op; if (op < len) { /* some from window */ len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { *out++ = *from++; *out++ = *from++; *out++ = *from++; len -= 3; } if (len) { *out++ = *from++; if (len > 1) *out++ = *from++; } } else { unsigned short *sout; unsigned long loops; from = out - dist; /* copy direct from output */ /* minimum length is three */ /* Align out addr */ if (!((long)(out - 1) & 1)) { *out++ = *from++; len--; } sout = (unsigned short *)(out); if (dist > 2) { unsigned short *sfrom; sfrom = (unsigned short *)(from); loops = len >> 1; do { if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) *sout++ = *sfrom++; else *sout++ = get_unaligned16(sfrom++); } while (--loops); out = (unsigned char *)sout; from = (unsigned char *)sfrom; } else { /* dist == 1 or dist == 2 */ unsigned short pat16; pat16 = *(sout-1); if (dist == 1) { union uu mm; /* copy one char pattern to both bytes */ mm.us = pat16; mm.b[0] = mm.b[1]; pat16 = mm.us; } loops = len >> 1; do *sout++ = pat16; while (--loops); out = (unsigned char *)sout; } if (len & 1) *out++ = *from++; } } else if ((op & 64) == 0) { /* 2nd level distance code */ this = dcode[this.val + (hold & ((1U << op) - 1))]; goto dodist; } else { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } } else if ((op & 64) == 0) { /* 2nd level length code */ this = lcode[this.val + (hold & ((1U << op) - 1))]; goto dolen; } else if (op & 32) { /* end-of-block */ state->mode = TYPE; break; } else { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } } while (in < last && out < end); /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ len = bits >> 3; in -= len; bits -= len << 3; hold &= (1U << bits) - 1; /* update state and return */ strm->next_in = in; strm->next_out = out; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); state->hold = hold; state->bits = bits; return; } /* inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): - Using bit fields for code structure - Different op definition to avoid & for extra bits (do & for table bits) - Three separate decoding do-loops for direct, window, and write == 0 - Special case for distance > 1 copies to do overlapped load and store copy - Explicit branch predictions (based on measured branch probabilities) - Deferring match copy and interspersed it with decoding subsequent codes - Swapping literal/length else - Swapping window/direct else - Larger unrolled copy loops (three is about right) - Moving len -= 3 statement into middle of loop */ #endif /* !ASMINF */ |
66 69 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> */ #ifndef _BR_PRIVATE_STP_H #define _BR_PRIVATE_STP_H #define BPDU_TYPE_CONFIG 0 #define BPDU_TYPE_TCN 0x80 /* IEEE 802.1D-1998 timer values */ #define BR_MIN_HELLO_TIME (1*HZ) #define BR_MAX_HELLO_TIME (10*HZ) #define BR_MIN_FORWARD_DELAY (2*HZ) #define BR_MAX_FORWARD_DELAY (30*HZ) #define BR_MIN_MAX_AGE (6*HZ) #define BR_MAX_MAX_AGE (40*HZ) #define BR_MIN_PATH_COST 1 #define BR_MAX_PATH_COST 65535 struct br_config_bpdu { unsigned int topology_change:1; unsigned int topology_change_ack:1; bridge_id root; int root_path_cost; bridge_id bridge_id; port_id port_id; int message_age; int max_age; int hello_time; int forward_delay; }; /* called under bridge lock */ static inline int br_is_designated_port(const struct net_bridge_port *p) { return !memcmp(&p->designated_bridge, &p->br->bridge_id, 8) && (p->designated_port == p->port_id); } /* br_stp.c */ void br_become_root_bridge(struct net_bridge *br); void br_config_bpdu_generation(struct net_bridge *); void br_configuration_update(struct net_bridge *); void br_port_state_selection(struct net_bridge *); void br_received_config_bpdu(struct net_bridge_port *p, const struct br_config_bpdu *bpdu); void br_received_tcn_bpdu(struct net_bridge_port *p); void br_transmit_config(struct net_bridge_port *p); void br_transmit_tcn(struct net_bridge *br); void br_topology_change_detection(struct net_bridge *br); void __br_set_topology_change(struct net_bridge *br, unsigned char val); /* br_stp_bpdu.c */ void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *); void br_send_tcn_bpdu(struct net_bridge_port *); #endif |
11 11 11 11 11 15 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 | /* * linux/fs/nls/nls_cp865.c * * Charset cp865 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0, 0x00e5, 0x00e7, 0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x00ec, 0x00c4, 0x00c5, /* 0x90*/ 0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9, 0x00ff, 0x00d6, 0x00dc, 0x00f8, 0x00a3, 0x00d8, 0x20a7, 0x0192, /* 0xa0*/ 0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba, 0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00a4, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x00b5, 0x03c4, 0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229, /* 0xf0*/ 0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0xad, 0x00, 0x9c, 0xaf, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */ 0x00, 0x00, 0xa7, 0x00, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */ 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, /* 0xd0-0xd7 */ 0x9d, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */ 0x85, 0xa0, 0x83, 0x00, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */ 0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */ 0x00, 0xa4, 0x95, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */ 0x9b, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0x98, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */ 0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */ }; static const unsigned char page23[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, page22, page23, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */ 0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x9a, 0x90, 0x00, 0x8e, 0x00, 0x8f, 0x80, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x92, 0x92, 0x00, 0x99, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp865", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_cp865(void) { return register_nls(&table); } static void __exit exit_nls_cp865(void) { unregister_nls(&table); } module_init(init_nls_cp865) module_exit(exit_nls_cp865) MODULE_DESCRIPTION("NLS Codepage 865 (Norwegian, Danish)"); MODULE_LICENSE("Dual BSD/GPL"); |
4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 4 4 4 4 4 4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 | // SPDX-License-Identifier: GPL-2.0-or-later /* * cx2341x - generic code for cx23415/6/8 based devices * * Copyright (C) 2006 Hans Verkuil <hverkuil@xs4all.nl> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/videodev2.h> #include <media/tuner.h> #include <media/drv-intf/cx2341x.h> #include <media/v4l2-common.h> MODULE_DESCRIPTION("cx23415/6/8 driver"); MODULE_AUTHOR("Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /********************** COMMON CODE *********************/ /* definitions for audio properties bits 29-28 */ #define CX2341X_AUDIO_ENCODING_METHOD_MPEG 0 #define CX2341X_AUDIO_ENCODING_METHOD_AC3 1 #define CX2341X_AUDIO_ENCODING_METHOD_LPCM 2 static const char *cx2341x_get_name(u32 id) { switch (id) { case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: return "Spatial Filter Mode"; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: return "Spatial Filter"; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: return "Spatial Luma Filter Type"; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: return "Spatial Chroma Filter Type"; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: return "Temporal Filter Mode"; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: return "Temporal Filter"; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: return "Median Filter Type"; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: return "Median Luma Filter Maximum"; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: return "Median Luma Filter Minimum"; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: return "Median Chroma Filter Maximum"; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: return "Median Chroma Filter Minimum"; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: return "Insert Navigation Packets"; } return NULL; } static const char **cx2341x_get_menu(u32 id) { static const char *cx2341x_video_spatial_filter_mode_menu[] = { "Manual", "Auto", NULL }; static const char *cx2341x_video_luma_spatial_filter_type_menu[] = { "Off", "1D Horizontal", "1D Vertical", "2D H/V Separable", "2D Symmetric non-separable", NULL }; static const char *cx2341x_video_chroma_spatial_filter_type_menu[] = { "Off", "1D Horizontal", NULL }; static const char *cx2341x_video_temporal_filter_mode_menu[] = { "Manual", "Auto", NULL }; static const char *cx2341x_video_median_filter_type_menu[] = { "Off", "Horizontal", "Vertical", "Horizontal/Vertical", "Diagonal", NULL }; switch (id) { case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: return cx2341x_video_spatial_filter_mode_menu; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: return cx2341x_video_luma_spatial_filter_type_menu; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: return cx2341x_video_chroma_spatial_filter_type_menu; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: return cx2341x_video_temporal_filter_mode_menu; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: return cx2341x_video_median_filter_type_menu; } return NULL; } static void cx2341x_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, s32 *min, s32 *max, s32 *step, s32 *def, u32 *flags) { *name = cx2341x_get_name(id); *flags = 0; switch (id) { case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: *type = V4L2_CTRL_TYPE_MENU; *min = 0; *step = 0; break; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: *type = V4L2_CTRL_TYPE_BOOLEAN; *min = 0; *max = *step = 1; break; default: *type = V4L2_CTRL_TYPE_INTEGER; break; } switch (id) { case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: *flags |= V4L2_CTRL_FLAG_UPDATE; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: *flags |= V4L2_CTRL_FLAG_SLIDER; break; case V4L2_CID_MPEG_VIDEO_ENCODING: *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; } } /********************** OLD CODE *********************/ /* Must be sorted from low to high control ID! */ const u32 cx2341x_mpeg_ctrls[] = { V4L2_CID_CODEC_CLASS, V4L2_CID_MPEG_STREAM_TYPE, V4L2_CID_MPEG_STREAM_VBI_FMT, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ, V4L2_CID_MPEG_AUDIO_ENCODING, V4L2_CID_MPEG_AUDIO_L2_BITRATE, V4L2_CID_MPEG_AUDIO_MODE, V4L2_CID_MPEG_AUDIO_MODE_EXTENSION, V4L2_CID_MPEG_AUDIO_EMPHASIS, V4L2_CID_MPEG_AUDIO_CRC, V4L2_CID_MPEG_AUDIO_MUTE, V4L2_CID_MPEG_AUDIO_AC3_BITRATE, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_CID_MPEG_VIDEO_ASPECT, V4L2_CID_MPEG_VIDEO_B_FRAMES, V4L2_CID_MPEG_VIDEO_GOP_SIZE, V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, V4L2_CID_MPEG_VIDEO_BITRATE, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, V4L2_CID_MPEG_VIDEO_MUTE, V4L2_CID_MPEG_VIDEO_MUTE_YUV, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP, V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS, 0 }; EXPORT_SYMBOL(cx2341x_mpeg_ctrls); static const struct cx2341x_mpeg_params default_params = { /* misc */ .capabilities = 0, .port = CX2341X_PORT_MEMORY, .width = 720, .height = 480, .is_50hz = 0, /* stream */ .stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS, .stream_vbi_fmt = V4L2_MPEG_STREAM_VBI_FMT_NONE, .stream_insert_nav_packets = 0, /* audio */ .audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000, .audio_encoding = V4L2_MPEG_AUDIO_ENCODING_LAYER_2, .audio_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_224K, .audio_ac3_bitrate = V4L2_MPEG_AUDIO_AC3_BITRATE_224K, .audio_mode = V4L2_MPEG_AUDIO_MODE_STEREO, .audio_mode_extension = V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4, .audio_emphasis = V4L2_MPEG_AUDIO_EMPHASIS_NONE, .audio_crc = V4L2_MPEG_AUDIO_CRC_NONE, .audio_mute = 0, /* video */ .video_encoding = V4L2_MPEG_VIDEO_ENCODING_MPEG_2, .video_aspect = V4L2_MPEG_VIDEO_ASPECT_4x3, .video_b_frames = 2, .video_gop_size = 12, .video_gop_closure = 1, .video_bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, .video_bitrate = 6000000, .video_bitrate_peak = 8000000, .video_temporal_decimation = 0, .video_mute = 0, .video_mute_yuv = 0x008080, /* YCbCr value for black */ /* encoding filters */ .video_spatial_filter_mode = V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, .video_spatial_filter = 0, .video_luma_spatial_filter_type = V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR, .video_chroma_spatial_filter_type = V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR, .video_temporal_filter_mode = V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL, .video_temporal_filter = 8, .video_median_filter_type = V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF, .video_luma_median_filter_top = 255, .video_luma_median_filter_bottom = 0, .video_chroma_median_filter_top = 255, .video_chroma_median_filter_bottom = 0, }; /* Map the control ID to the correct field in the cx2341x_mpeg_params struct. Return -EINVAL if the ID is unknown, else return 0. */ static int cx2341x_get_ctrl(const struct cx2341x_mpeg_params *params, struct v4l2_ext_control *ctrl) { switch (ctrl->id) { case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: ctrl->value = params->audio_sampling_freq; break; case V4L2_CID_MPEG_AUDIO_ENCODING: ctrl->value = params->audio_encoding; break; case V4L2_CID_MPEG_AUDIO_L2_BITRATE: ctrl->value = params->audio_l2_bitrate; break; case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: ctrl->value = params->audio_ac3_bitrate; break; case V4L2_CID_MPEG_AUDIO_MODE: ctrl->value = params->audio_mode; break; case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: ctrl->value = params->audio_mode_extension; break; case V4L2_CID_MPEG_AUDIO_EMPHASIS: ctrl->value = params->audio_emphasis; break; case V4L2_CID_MPEG_AUDIO_CRC: ctrl->value = params->audio_crc; break; case V4L2_CID_MPEG_AUDIO_MUTE: ctrl->value = params->audio_mute; break; case V4L2_CID_MPEG_VIDEO_ENCODING: ctrl->value = params->video_encoding; break; case V4L2_CID_MPEG_VIDEO_ASPECT: ctrl->value = params->video_aspect; break; case V4L2_CID_MPEG_VIDEO_B_FRAMES: ctrl->value = params->video_b_frames; break; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: ctrl->value = params->video_gop_size; break; case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: ctrl->value = params->video_gop_closure; break; case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: ctrl->value = params->video_bitrate_mode; break; case V4L2_CID_MPEG_VIDEO_BITRATE: ctrl->value = params->video_bitrate; break; case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: ctrl->value = params->video_bitrate_peak; break; case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: ctrl->value = params->video_temporal_decimation; break; case V4L2_CID_MPEG_VIDEO_MUTE: ctrl->value = params->video_mute; break; case V4L2_CID_MPEG_VIDEO_MUTE_YUV: ctrl->value = params->video_mute_yuv; break; case V4L2_CID_MPEG_STREAM_TYPE: ctrl->value = params->stream_type; break; case V4L2_CID_MPEG_STREAM_VBI_FMT: ctrl->value = params->stream_vbi_fmt; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: ctrl->value = params->video_spatial_filter_mode; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: ctrl->value = params->video_spatial_filter; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: ctrl->value = params->video_luma_spatial_filter_type; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: ctrl->value = params->video_chroma_spatial_filter_type; break; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: ctrl->value = params->video_temporal_filter_mode; break; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: ctrl->value = params->video_temporal_filter; break; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: ctrl->value = params->video_median_filter_type; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: ctrl->value = params->video_luma_median_filter_top; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: ctrl->value = params->video_luma_median_filter_bottom; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: ctrl->value = params->video_chroma_median_filter_top; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: ctrl->value = params->video_chroma_median_filter_bottom; break; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: ctrl->value = params->stream_insert_nav_packets; break; default: return -EINVAL; } return 0; } /* Map the control ID to the correct field in the cx2341x_mpeg_params struct. Return -EINVAL if the ID is unknown, else return 0. */ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params, int busy, struct v4l2_ext_control *ctrl) { switch (ctrl->id) { case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: if (busy) return -EBUSY; params->audio_sampling_freq = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_ENCODING: if (busy) return -EBUSY; if (params->capabilities & CX2341X_CAP_HAS_AC3) if (ctrl->value != V4L2_MPEG_AUDIO_ENCODING_LAYER_2 && ctrl->value != V4L2_MPEG_AUDIO_ENCODING_AC3) return -ERANGE; params->audio_encoding = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_L2_BITRATE: if (busy) return -EBUSY; params->audio_l2_bitrate = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: if (busy) return -EBUSY; if (!(params->capabilities & CX2341X_CAP_HAS_AC3)) return -EINVAL; params->audio_ac3_bitrate = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_MODE: params->audio_mode = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: params->audio_mode_extension = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_EMPHASIS: params->audio_emphasis = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_CRC: params->audio_crc = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_MUTE: params->audio_mute = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_ASPECT: params->video_aspect = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_B_FRAMES: { int b = ctrl->value + 1; int gop = params->video_gop_size; params->video_b_frames = ctrl->value; params->video_gop_size = b * ((gop + b - 1) / b); /* Max GOP size = 34 */ while (params->video_gop_size > 34) params->video_gop_size -= b; break; } case V4L2_CID_MPEG_VIDEO_GOP_SIZE: { int b = params->video_b_frames + 1; int gop = ctrl->value; params->video_gop_size = b * ((gop + b - 1) / b); /* Max GOP size = 34 */ while (params->video_gop_size > 34) params->video_gop_size -= b; ctrl->value = params->video_gop_size; break; } case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: params->video_gop_closure = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: if (busy) return -EBUSY; /* MPEG-1 only allows CBR */ if (params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1 && ctrl->value != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) return -EINVAL; params->video_bitrate_mode = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_BITRATE: if (busy) return -EBUSY; params->video_bitrate = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: if (busy) return -EBUSY; params->video_bitrate_peak = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: params->video_temporal_decimation = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_MUTE: params->video_mute = (ctrl->value != 0); break; case V4L2_CID_MPEG_VIDEO_MUTE_YUV: params->video_mute_yuv = ctrl->value; break; case V4L2_CID_MPEG_STREAM_TYPE: if (busy) return -EBUSY; params->stream_type = ctrl->value; params->video_encoding = (params->stream_type == V4L2_MPEG_STREAM_TYPE_MPEG1_SS || params->stream_type == V4L2_MPEG_STREAM_TYPE_MPEG1_VCD) ? V4L2_MPEG_VIDEO_ENCODING_MPEG_1 : V4L2_MPEG_VIDEO_ENCODING_MPEG_2; if (params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) /* MPEG-1 implies CBR */ params->video_bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; break; case V4L2_CID_MPEG_STREAM_VBI_FMT: params->stream_vbi_fmt = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: params->video_spatial_filter_mode = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: params->video_spatial_filter = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: params->video_luma_spatial_filter_type = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: params->video_chroma_spatial_filter_type = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: params->video_temporal_filter_mode = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: params->video_temporal_filter = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: params->video_median_filter_type = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: params->video_luma_median_filter_top = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: params->video_luma_median_filter_bottom = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: params->video_chroma_median_filter_top = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: params->video_chroma_median_filter_bottom = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: params->stream_insert_nav_packets = ctrl->value; break; default: return -EINVAL; } return 0; } static int cx2341x_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def) { const char *name; switch (qctrl->id) { /* MPEG controls */ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: cx2341x_ctrl_fill(qctrl->id, &name, &qctrl->type, &min, &max, &step, &def, &qctrl->flags); qctrl->minimum = min; qctrl->maximum = max; qctrl->step = step; qctrl->default_value = def; qctrl->reserved[0] = qctrl->reserved[1] = 0; strscpy(qctrl->name, name, sizeof(qctrl->name)); return 0; default: return v4l2_ctrl_query_fill(qctrl, min, max, step, def); } } int cx2341x_ctrl_query(const struct cx2341x_mpeg_params *params, struct v4l2_queryctrl *qctrl) { int err; switch (qctrl->id) { case V4L2_CID_CODEC_CLASS: return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0); case V4L2_CID_MPEG_STREAM_TYPE: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_STREAM_TYPE_MPEG2_PS, V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD, 1, V4L2_MPEG_STREAM_TYPE_MPEG2_PS); case V4L2_CID_MPEG_STREAM_VBI_FMT: if (params->capabilities & CX2341X_CAP_HAS_SLICED_VBI) return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_STREAM_VBI_FMT_NONE, V4L2_MPEG_STREAM_VBI_FMT_IVTV, 1, V4L2_MPEG_STREAM_VBI_FMT_NONE); return cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_STREAM_VBI_FMT_NONE, V4L2_MPEG_STREAM_VBI_FMT_NONE, 1, default_params.stream_vbi_fmt); case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100, V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000, 1, V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000); case V4L2_CID_MPEG_AUDIO_ENCODING: if (params->capabilities & CX2341X_CAP_HAS_AC3) { /* * The state of L2 & AC3 bitrate controls can change * when this control changes, but v4l2_ctrl_query_fill() * already sets V4L2_CTRL_FLAG_UPDATE for * V4L2_CID_MPEG_AUDIO_ENCODING, so we don't here. */ return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_ENCODING_LAYER_2, V4L2_MPEG_AUDIO_ENCODING_AC3, 1, default_params.audio_encoding); } return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_ENCODING_LAYER_2, V4L2_MPEG_AUDIO_ENCODING_LAYER_2, 1, default_params.audio_encoding); case V4L2_CID_MPEG_AUDIO_L2_BITRATE: err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_L2_BITRATE_192K, V4L2_MPEG_AUDIO_L2_BITRATE_384K, 1, default_params.audio_l2_bitrate); if (err) return err; if (params->capabilities & CX2341X_CAP_HAS_AC3 && params->audio_encoding != V4L2_MPEG_AUDIO_ENCODING_LAYER_2) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_AUDIO_MODE: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_MODE_STEREO, V4L2_MPEG_AUDIO_MODE_MONO, 1, V4L2_MPEG_AUDIO_MODE_STEREO); case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16, 1, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4); if (err == 0 && params->audio_mode != V4L2_MPEG_AUDIO_MODE_JOINT_STEREO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return err; case V4L2_CID_MPEG_AUDIO_EMPHASIS: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_EMPHASIS_NONE, V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17, 1, V4L2_MPEG_AUDIO_EMPHASIS_NONE); case V4L2_CID_MPEG_AUDIO_CRC: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_CRC_NONE, V4L2_MPEG_AUDIO_CRC_CRC16, 1, V4L2_MPEG_AUDIO_CRC_NONE); case V4L2_CID_MPEG_AUDIO_MUTE: return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0); case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_AC3_BITRATE_48K, V4L2_MPEG_AUDIO_AC3_BITRATE_448K, 1, default_params.audio_ac3_bitrate); if (err) return err; if (params->capabilities & CX2341X_CAP_HAS_AC3) { if (params->audio_encoding != V4L2_MPEG_AUDIO_ENCODING_AC3) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; } else qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; return 0; case V4L2_CID_MPEG_VIDEO_ENCODING: /* this setting is read-only for the cx2341x since the V4L2_CID_MPEG_STREAM_TYPE really determines the MPEG-1/2 setting */ err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_VIDEO_ENCODING_MPEG_1, V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 1, V4L2_MPEG_VIDEO_ENCODING_MPEG_2); if (err == 0) qctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; return err; case V4L2_CID_MPEG_VIDEO_ASPECT: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_VIDEO_ASPECT_1x1, V4L2_MPEG_VIDEO_ASPECT_221x100, 1, V4L2_MPEG_VIDEO_ASPECT_4x3); case V4L2_CID_MPEG_VIDEO_B_FRAMES: return v4l2_ctrl_query_fill(qctrl, 0, 33, 1, 2); case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return v4l2_ctrl_query_fill(qctrl, 1, 34, 1, params->is_50hz ? 12 : 15); case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 1); case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 1, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); if (err == 0 && params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return err; case V4L2_CID_MPEG_VIDEO_BITRATE: return v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 6000000); case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: err = v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 8000000); if (err == 0 && params->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return err; case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0); case V4L2_CID_MPEG_VIDEO_MUTE: return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0); case V4L2_CID_MPEG_VIDEO_MUTE_YUV: /* Init YUV (really YCbCr) to black */ return v4l2_ctrl_query_fill(qctrl, 0, 0xffffff, 1, 0x008080); /* CX23415/6 specific */ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: return cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO, 1, default_params.video_spatial_filter_mode); case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: cx2341x_ctrl_query_fill(qctrl, 0, 15, 1, default_params.video_spatial_filter); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_spatial_filter_mode == V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE, 1, default_params.video_luma_spatial_filter_type); if (params->video_spatial_filter_mode == V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR, 1, default_params.video_chroma_spatial_filter_type); if (params->video_spatial_filter_mode == V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: return cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO, 1, default_params.video_temporal_filter_mode); case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: cx2341x_ctrl_query_fill(qctrl, 0, 31, 1, default_params.video_temporal_filter); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_temporal_filter_mode == V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: return cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG, 1, default_params.video_median_filter_type); case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, default_params.video_luma_median_filter_top); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_median_filter_type == V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, default_params.video_luma_median_filter_bottom); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_median_filter_type == V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, default_params.video_chroma_median_filter_top); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_median_filter_type == V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, default_params.video_chroma_median_filter_bottom); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_median_filter_type == V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: return cx2341x_ctrl_query_fill(qctrl, 0, 1, 1, default_params.stream_insert_nav_packets); default: return -EINVAL; } } EXPORT_SYMBOL(cx2341x_ctrl_query); const char * const *cx2341x_ctrl_get_menu(const struct cx2341x_mpeg_params *p, u32 id) { static const char * const mpeg_stream_type_without_ts[] = { "MPEG-2 Program Stream", "", "MPEG-1 System Stream", "MPEG-2 DVD-compatible Stream", "MPEG-1 VCD-compatible Stream", "MPEG-2 SVCD-compatible Stream", NULL }; static const char *mpeg_stream_type_with_ts[] = { "MPEG-2 Program Stream", "MPEG-2 Transport Stream", "MPEG-1 System Stream", "MPEG-2 DVD-compatible Stream", "MPEG-1 VCD-compatible Stream", "MPEG-2 SVCD-compatible Stream", NULL }; static const char *mpeg_audio_encoding_l2_ac3[] = { "", "MPEG-1/2 Layer II", "", "", "AC-3", NULL }; switch (id) { case V4L2_CID_MPEG_STREAM_TYPE: return (p->capabilities & CX2341X_CAP_HAS_TS) ? mpeg_stream_type_with_ts : mpeg_stream_type_without_ts; case V4L2_CID_MPEG_AUDIO_ENCODING: return (p->capabilities & CX2341X_CAP_HAS_AC3) ? mpeg_audio_encoding_l2_ac3 : v4l2_ctrl_get_menu(id); case V4L2_CID_MPEG_AUDIO_L1_BITRATE: case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return NULL; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: return cx2341x_get_menu(id); default: return v4l2_ctrl_get_menu(id); } } EXPORT_SYMBOL(cx2341x_ctrl_get_menu); static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params) { params->audio_properties = (params->audio_sampling_freq << 0) | (params->audio_mode << 8) | (params->audio_mode_extension << 10) | (((params->audio_emphasis == V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17) ? 3 : params->audio_emphasis) << 12) | (params->audio_crc << 14); if ((params->capabilities & CX2341X_CAP_HAS_AC3) && params->audio_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3) { params->audio_properties |= /* Not sure if this MPEG Layer II setting is required */ ((3 - V4L2_MPEG_AUDIO_ENCODING_LAYER_2) << 2) | (params->audio_ac3_bitrate << 4) | (CX2341X_AUDIO_ENCODING_METHOD_AC3 << 28); } else { /* Assuming MPEG Layer II */ params->audio_properties |= ((3 - params->audio_encoding) << 2) | ((1 + params->audio_l2_bitrate) << 4); } } /* Check for correctness of the ctrl's value based on the data from struct v4l2_queryctrl and the available menu items. Note that menu_items may be NULL, in that case it is ignored. */ static int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl, const char * const *menu_items) { if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED) return -EINVAL; if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED) return -EBUSY; if (qctrl->type == V4L2_CTRL_TYPE_STRING) return 0; if (qctrl->type == V4L2_CTRL_TYPE_BUTTON || qctrl->type == V4L2_CTRL_TYPE_INTEGER64 || qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) return 0; if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum) return -ERANGE; if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) { if (menu_items[ctrl->value] == NULL || menu_items[ctrl->value][0] == '\0') return -EINVAL; } if (qctrl->type == V4L2_CTRL_TYPE_BITMASK && (ctrl->value & ~qctrl->maximum)) return -ERANGE; return 0; } int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, int busy, struct v4l2_ext_controls *ctrls, unsigned int cmd) { int err = 0; int i; if (cmd == VIDIOC_G_EXT_CTRLS) { for (i = 0; i < ctrls->count; i++) { struct v4l2_ext_control *ctrl = ctrls->controls + i; err = cx2341x_get_ctrl(params, ctrl); if (err) { ctrls->error_idx = i; break; } } return err; } for (i = 0; i < ctrls->count; i++) { struct v4l2_ext_control *ctrl = ctrls->controls + i; struct v4l2_queryctrl qctrl; const char * const *menu_items = NULL; qctrl.id = ctrl->id; err = cx2341x_ctrl_query(params, &qctrl); if (err) break; if (qctrl.type == V4L2_CTRL_TYPE_MENU) menu_items = cx2341x_ctrl_get_menu(params, qctrl.id); err = v4l2_ctrl_check(ctrl, &qctrl, menu_items); if (err) break; err = cx2341x_set_ctrl(params, busy, ctrl); if (err) break; } if (err == 0 && params->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && params->video_bitrate_peak < params->video_bitrate) { err = -ERANGE; ctrls->error_idx = ctrls->count; } if (err) ctrls->error_idx = i; else cx2341x_calc_audio_properties(params); return err; } EXPORT_SYMBOL(cx2341x_ext_ctrls); void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p) { *p = default_params; cx2341x_calc_audio_properties(p); } EXPORT_SYMBOL(cx2341x_fill_defaults); static int cx2341x_api(void *priv, cx2341x_mbox_func func, u32 cmd, int args, ...) { u32 data[CX2341X_MBOX_MAX_DATA]; va_list vargs; int i; va_start(vargs, args); for (i = 0; i < args; i++) data[i] = va_arg(vargs, int); va_end(vargs); return func(priv, cmd, args, 0, data); } #define CMP_FIELD(__old, __new, __field) (__old->__field != __new->__field) int cx2341x_update(void *priv, cx2341x_mbox_func func, const struct cx2341x_mpeg_params *old, const struct cx2341x_mpeg_params *new) { static int mpeg_stream_type[] = { 0, /* MPEG-2 PS */ 1, /* MPEG-2 TS */ 2, /* MPEG-1 SS */ 14, /* DVD */ 11, /* VCD */ 12, /* SVCD */ }; int err; cx2341x_api(priv, func, CX2341X_ENC_SET_OUTPUT_PORT, 2, new->port, 0); if (!old || CMP_FIELD(old, new, is_50hz)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_RATE, 1, new->is_50hz); if (err) return err; } if (!old || CMP_FIELD(old, new, width) || CMP_FIELD(old, new, height) || CMP_FIELD(old, new, video_encoding)) { u16 w = new->width; u16 h = new->height; if (new->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) { w /= 2; h /= 2; } err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_SIZE, 2, h, w); if (err) return err; } if (!old || CMP_FIELD(old, new, stream_type)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_STREAM_TYPE, 1, mpeg_stream_type[new->stream_type]); if (err) return err; } if (!old || CMP_FIELD(old, new, video_aspect)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_ASPECT_RATIO, 1, 1 + new->video_aspect); if (err) return err; } if (!old || CMP_FIELD(old, new, video_b_frames) || CMP_FIELD(old, new, video_gop_size)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_PROPERTIES, 2, new->video_gop_size, new->video_b_frames + 1); if (err) return err; } if (!old || CMP_FIELD(old, new, video_gop_closure)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_CLOSURE, 1, new->video_gop_closure); if (err) return err; } if (!old || CMP_FIELD(old, new, audio_properties)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, new->audio_properties); if (err) return err; } if (!old || CMP_FIELD(old, new, audio_mute)) { err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_AUDIO, 1, new->audio_mute); if (err) return err; } if (!old || CMP_FIELD(old, new, video_bitrate_mode) || CMP_FIELD(old, new, video_bitrate) || CMP_FIELD(old, new, video_bitrate_peak)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_BIT_RATE, 5, new->video_bitrate_mode, new->video_bitrate, new->video_bitrate_peak / 400, 0, 0); if (err) return err; } if (!old || CMP_FIELD(old, new, video_spatial_filter_mode) || CMP_FIELD(old, new, video_temporal_filter_mode) || CMP_FIELD(old, new, video_median_filter_type)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_MODE, 2, new->video_spatial_filter_mode | (new->video_temporal_filter_mode << 1), new->video_median_filter_type); if (err) return err; } if (!old || CMP_FIELD(old, new, video_luma_median_filter_bottom) || CMP_FIELD(old, new, video_luma_median_filter_top) || CMP_FIELD(old, new, video_chroma_median_filter_bottom) || CMP_FIELD(old, new, video_chroma_median_filter_top)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_CORING_LEVELS, 4, new->video_luma_median_filter_bottom, new->video_luma_median_filter_top, new->video_chroma_median_filter_bottom, new->video_chroma_median_filter_top); if (err) return err; } if (!old || CMP_FIELD(old, new, video_luma_spatial_filter_type) || CMP_FIELD(old, new, video_chroma_spatial_filter_type)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, 2, new->video_luma_spatial_filter_type, new->video_chroma_spatial_filter_type); if (err) return err; } if (!old || CMP_FIELD(old, new, video_spatial_filter) || CMP_FIELD(old, new, video_temporal_filter)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_PROPS, 2, new->video_spatial_filter, new->video_temporal_filter); if (err) return err; } if (!old || CMP_FIELD(old, new, video_temporal_decimation)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_DROP_RATE, 1, new->video_temporal_decimation); if (err) return err; } if (!old || CMP_FIELD(old, new, video_mute) || (new->video_mute && CMP_FIELD(old, new, video_mute_yuv))) { err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_VIDEO, 1, new->video_mute | (new->video_mute_yuv << 8)); if (err) return err; } if (!old || CMP_FIELD(old, new, stream_insert_nav_packets)) { err = cx2341x_api(priv, func, CX2341X_ENC_MISC, 2, 7, new->stream_insert_nav_packets); if (err) return err; } return 0; } EXPORT_SYMBOL(cx2341x_update); static const char *cx2341x_menu_item(const struct cx2341x_mpeg_params *p, u32 id) { const char * const *menu = cx2341x_ctrl_get_menu(p, id); struct v4l2_ext_control ctrl; if (menu == NULL) goto invalid; ctrl.id = id; if (cx2341x_get_ctrl(p, &ctrl)) goto invalid; while (ctrl.value-- && *menu) menu++; if (*menu == NULL) goto invalid; return *menu; invalid: return "<invalid>"; } void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix) { int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1; /* Stream */ printk(KERN_INFO "%s: Stream: %s", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE)); if (p->stream_insert_nav_packets) printk(KERN_CONT " (with navigation packets)"); printk(KERN_CONT "\n"); printk(KERN_INFO "%s: VBI Format: %s\n", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT)); /* Video */ printk(KERN_INFO "%s: Video: %dx%d, %d fps%s\n", prefix, p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1), p->is_50hz ? 25 : 30, (p->video_mute) ? " (muted)" : ""); printk(KERN_INFO "%s: Video: %s, %s, %s, %d", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING), cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT), cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE), p->video_bitrate); if (p->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) printk(KERN_CONT ", Peak %d", p->video_bitrate_peak); printk(KERN_CONT "\n"); printk(KERN_INFO "%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure\n", prefix, p->video_gop_size, p->video_b_frames, p->video_gop_closure ? "" : "No "); if (p->video_temporal_decimation) printk(KERN_INFO "%s: Video: Temporal Decimation %d\n", prefix, p->video_temporal_decimation); /* Audio */ printk(KERN_INFO "%s: Audio: %s, %s, %s, %s%s", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ), cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING), cx2341x_menu_item(p, p->audio_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3 ? V4L2_CID_MPEG_AUDIO_AC3_BITRATE : V4L2_CID_MPEG_AUDIO_L2_BITRATE), cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE), p->audio_mute ? " (muted)" : ""); if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO) printk(KERN_CONT ", %s", cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE_EXTENSION)); printk(KERN_CONT ", %s, %s\n", cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_EMPHASIS), cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC)); /* Encoding filters */ printk(KERN_INFO "%s: Spatial Filter: %s, Luma %s, Chroma %s, %d\n", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE), cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE), cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE), p->video_spatial_filter); printk(KERN_INFO "%s: Temporal Filter: %s, %d\n", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE), p->video_temporal_filter); printk(KERN_INFO "%s: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE), p->video_luma_median_filter_bottom, p->video_luma_median_filter_top, p->video_chroma_median_filter_bottom, p->video_chroma_median_filter_top); } EXPORT_SYMBOL(cx2341x_log_status); /********************** NEW CODE *********************/ static inline struct cx2341x_handler *to_cxhdl(struct v4l2_ctrl *ctrl) { return container_of(ctrl->handler, struct cx2341x_handler, hdl); } static int cx2341x_hdl_api(struct cx2341x_handler *hdl, u32 cmd, int args, ...) { u32 data[CX2341X_MBOX_MAX_DATA]; va_list vargs; int i; va_start(vargs, args); for (i = 0; i < args; i++) data[i] = va_arg(vargs, int); va_end(vargs); return hdl->func(hdl->priv, cmd, args, 0, data); } /* ctrl->handler->lock is held, so it is safe to access cur.val */ static inline int cx2341x_neq(struct v4l2_ctrl *ctrl) { return ctrl && ctrl->val != ctrl->cur.val; } static int cx2341x_try_ctrl(struct v4l2_ctrl *ctrl) { struct cx2341x_handler *hdl = to_cxhdl(ctrl); s32 val = ctrl->val; switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_B_FRAMES: { /* video gop cluster */ int b = val + 1; int gop = hdl->video_gop_size->val; gop = b * ((gop + b - 1) / b); /* Max GOP size = 34 */ while (gop > 34) gop -= b; hdl->video_gop_size->val = gop; break; } case V4L2_CID_MPEG_STREAM_TYPE: /* stream type cluster */ hdl->video_encoding->val = (hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_SS || hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_VCD) ? V4L2_MPEG_VIDEO_ENCODING_MPEG_1 : V4L2_MPEG_VIDEO_ENCODING_MPEG_2; if (hdl->video_encoding->val == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) /* MPEG-1 implies CBR */ hdl->video_bitrate_mode->val = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; /* peak bitrate shall be >= normal bitrate */ if (hdl->video_bitrate_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && hdl->video_bitrate_peak->val < hdl->video_bitrate->val) hdl->video_bitrate_peak->val = hdl->video_bitrate->val; break; } return 0; } static int cx2341x_s_ctrl(struct v4l2_ctrl *ctrl) { static const int mpeg_stream_type[] = { 0, /* MPEG-2 PS */ 1, /* MPEG-2 TS */ 2, /* MPEG-1 SS */ 14, /* DVD */ 11, /* VCD */ 12, /* SVCD */ }; struct cx2341x_handler *hdl = to_cxhdl(ctrl); s32 val = ctrl->val; u32 props; int err; switch (ctrl->id) { case V4L2_CID_MPEG_STREAM_VBI_FMT: if (hdl->ops && hdl->ops->s_stream_vbi_fmt) return hdl->ops->s_stream_vbi_fmt(hdl, val); return 0; case V4L2_CID_MPEG_VIDEO_ASPECT: return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_ASPECT_RATIO, 1, val + 1); case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_CLOSURE, 1, val); case V4L2_CID_MPEG_AUDIO_MUTE: return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_AUDIO, 1, val); case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_FRAME_DROP_RATE, 1, val); case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: return cx2341x_hdl_api(hdl, CX2341X_ENC_MISC, 2, 7, val); case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: /* audio properties cluster */ props = (hdl->audio_sampling_freq->val << 0) | (hdl->audio_mode->val << 8) | (hdl->audio_mode_extension->val << 10) | (hdl->audio_crc->val << 14); if (hdl->audio_emphasis->val == V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17) props |= 3 << 12; else props |= hdl->audio_emphasis->val << 12; if (hdl->audio_encoding->val == V4L2_MPEG_AUDIO_ENCODING_AC3) { props |= #if 1 /* Not sure if this MPEG Layer II setting is required */ ((3 - V4L2_MPEG_AUDIO_ENCODING_LAYER_2) << 2) | #endif (hdl->audio_ac3_bitrate->val << 4) | (CX2341X_AUDIO_ENCODING_METHOD_AC3 << 28); } else { /* Assuming MPEG Layer II */ props |= ((3 - hdl->audio_encoding->val) << 2) | ((1 + hdl->audio_l2_bitrate->val) << 4); } err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, props); if (err) return err; hdl->audio_properties = props; if (hdl->audio_ac3_bitrate) { int is_ac3 = hdl->audio_encoding->val == V4L2_MPEG_AUDIO_ENCODING_AC3; v4l2_ctrl_activate(hdl->audio_ac3_bitrate, is_ac3); v4l2_ctrl_activate(hdl->audio_l2_bitrate, !is_ac3); } v4l2_ctrl_activate(hdl->audio_mode_extension, hdl->audio_mode->val == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO); if (cx2341x_neq(hdl->audio_sampling_freq) && hdl->ops && hdl->ops->s_audio_sampling_freq) return hdl->ops->s_audio_sampling_freq(hdl, hdl->audio_sampling_freq->val); if (cx2341x_neq(hdl->audio_mode) && hdl->ops && hdl->ops->s_audio_mode) return hdl->ops->s_audio_mode(hdl, hdl->audio_mode->val); return 0; case V4L2_CID_MPEG_VIDEO_B_FRAMES: /* video gop cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_PROPERTIES, 2, hdl->video_gop_size->val, hdl->video_b_frames->val + 1); case V4L2_CID_MPEG_STREAM_TYPE: /* stream type cluster */ err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_STREAM_TYPE, 1, mpeg_stream_type[val]); if (err) return err; err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_BIT_RATE, 5, hdl->video_bitrate_mode->val, hdl->video_bitrate->val, hdl->video_bitrate_peak->val / 400, 0, 0); if (err) return err; v4l2_ctrl_activate(hdl->video_bitrate_mode, hdl->video_encoding->val != V4L2_MPEG_VIDEO_ENCODING_MPEG_1); v4l2_ctrl_activate(hdl->video_bitrate_peak, hdl->video_bitrate_mode->val != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); if (cx2341x_neq(hdl->video_encoding) && hdl->ops && hdl->ops->s_video_encoding) return hdl->ops->s_video_encoding(hdl, hdl->video_encoding->val); return 0; case V4L2_CID_MPEG_VIDEO_MUTE: /* video mute cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_VIDEO, 1, hdl->video_mute->val | (hdl->video_mute_yuv->val << 8)); case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: { int active_filter; /* video filter mode */ err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_MODE, 2, hdl->video_spatial_filter_mode->val | (hdl->video_temporal_filter_mode->val << 1), hdl->video_median_filter_type->val); if (err) return err; active_filter = hdl->video_spatial_filter_mode->val != V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO; v4l2_ctrl_activate(hdl->video_spatial_filter, active_filter); v4l2_ctrl_activate(hdl->video_luma_spatial_filter_type, active_filter); v4l2_ctrl_activate(hdl->video_chroma_spatial_filter_type, active_filter); active_filter = hdl->video_temporal_filter_mode->val != V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO; v4l2_ctrl_activate(hdl->video_temporal_filter, active_filter); active_filter = hdl->video_median_filter_type->val != V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF; v4l2_ctrl_activate(hdl->video_luma_median_filter_bottom, active_filter); v4l2_ctrl_activate(hdl->video_luma_median_filter_top, active_filter); v4l2_ctrl_activate(hdl->video_chroma_median_filter_bottom, active_filter); v4l2_ctrl_activate(hdl->video_chroma_median_filter_top, active_filter); return 0; } case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: /* video filter type cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, 2, hdl->video_luma_spatial_filter_type->val, hdl->video_chroma_spatial_filter_type->val); case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: /* video filter cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_PROPS, 2, hdl->video_spatial_filter->val, hdl->video_temporal_filter->val); case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: /* video median cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_CORING_LEVELS, 4, hdl->video_luma_median_filter_bottom->val, hdl->video_luma_median_filter_top->val, hdl->video_chroma_median_filter_bottom->val, hdl->video_chroma_median_filter_top->val); } return -EINVAL; } static const struct v4l2_ctrl_ops cx2341x_ops = { .try_ctrl = cx2341x_try_ctrl, .s_ctrl = cx2341x_s_ctrl, }; static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, u32 id, s32 min, s32 max, s32 step, s32 def) { struct v4l2_ctrl_config cfg; memset(&cfg, 0, sizeof(cfg)); cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags); cfg.ops = &cx2341x_ops; cfg.id = id; cfg.min = min; cfg.max = max; cfg.def = def; if (cfg.type == V4L2_CTRL_TYPE_MENU) { cfg.step = 0; cfg.menu_skip_mask = step; cfg.qmenu = cx2341x_get_menu(id); } else { cfg.step = step; cfg.menu_skip_mask = 0; } return v4l2_ctrl_new_custom(hdl, &cfg, NULL); } static struct v4l2_ctrl *cx2341x_ctrl_new_std(struct v4l2_ctrl_handler *hdl, u32 id, s32 min, s32 max, s32 step, s32 def) { return v4l2_ctrl_new_std(hdl, &cx2341x_ops, id, min, max, step, def); } static struct v4l2_ctrl *cx2341x_ctrl_new_menu(struct v4l2_ctrl_handler *hdl, u32 id, s32 max, s32 mask, s32 def) { return v4l2_ctrl_new_std_menu(hdl, &cx2341x_ops, id, max, mask, def); } int cx2341x_handler_init(struct cx2341x_handler *cxhdl, unsigned nr_of_controls_hint) { struct v4l2_ctrl_handler *hdl = &cxhdl->hdl; u32 caps = cxhdl->capabilities; int has_sliced_vbi = caps & CX2341X_CAP_HAS_SLICED_VBI; int has_ac3 = caps & CX2341X_CAP_HAS_AC3; int has_ts = caps & CX2341X_CAP_HAS_TS; cxhdl->width = 720; cxhdl->height = 480; v4l2_ctrl_handler_init(hdl, nr_of_controls_hint); /* Add controls in ascending control ID order for fastest insertion time. */ cxhdl->stream_type = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_STREAM_TYPE, V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD, has_ts ? 0 : 2, V4L2_MPEG_STREAM_TYPE_MPEG2_PS); cxhdl->stream_vbi_fmt = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_STREAM_VBI_FMT, V4L2_MPEG_STREAM_VBI_FMT_IVTV, has_sliced_vbi ? 0 : 2, V4L2_MPEG_STREAM_VBI_FMT_NONE); cxhdl->audio_sampling_freq = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ, V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000, 0, V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000); cxhdl->audio_encoding = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_ENCODING, V4L2_MPEG_AUDIO_ENCODING_AC3, has_ac3 ? ~0x12 : ~0x2, V4L2_MPEG_AUDIO_ENCODING_LAYER_2); cxhdl->audio_l2_bitrate = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_L2_BITRATE, V4L2_MPEG_AUDIO_L2_BITRATE_384K, 0x1ff, V4L2_MPEG_AUDIO_L2_BITRATE_224K); cxhdl->audio_mode = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_MODE, V4L2_MPEG_AUDIO_MODE_MONO, 0, V4L2_MPEG_AUDIO_MODE_STEREO); cxhdl->audio_mode_extension = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_MODE_EXTENSION, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16, 0, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4); cxhdl->audio_emphasis = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_EMPHASIS, V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17, 0, V4L2_MPEG_AUDIO_EMPHASIS_NONE); cxhdl->audio_crc = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_CRC, V4L2_MPEG_AUDIO_CRC_CRC16, 0, V4L2_MPEG_AUDIO_CRC_NONE); cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_AUDIO_MUTE, 0, 1, 1, 0); if (has_ac3) cxhdl->audio_ac3_bitrate = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_AC3_BITRATE, V4L2_MPEG_AUDIO_AC3_BITRATE_448K, 0x03, V4L2_MPEG_AUDIO_AC3_BITRATE_224K); cxhdl->video_encoding = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 0, V4L2_MPEG_VIDEO_ENCODING_MPEG_2); cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_VIDEO_ASPECT, V4L2_MPEG_VIDEO_ASPECT_221x100, 0, V4L2_MPEG_VIDEO_ASPECT_4x3); cxhdl->video_b_frames = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 33, 1, 2); cxhdl->video_gop_size = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 34, 1, cxhdl->is_50hz ? 12 : 15); cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, 0, 1, 1, 1); cxhdl->video_bitrate_mode = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); cxhdl->video_bitrate = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_BITRATE, 0, 27000000, 1, 6000000); cxhdl->video_bitrate_peak = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, 0, 27000000, 1, 8000000); cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, 0, 255, 1, 0); cxhdl->video_mute = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_MUTE, 0, 1, 1, 0); cxhdl->video_mute_yuv = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_MUTE_YUV, 0, 0xffffff, 1, 0x008080); /* CX23415/6 specific */ cxhdl->video_spatial_filter_mode = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO, 0, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL); cxhdl->video_spatial_filter = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER, 0, 15, 1, 0); cxhdl->video_luma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE, 0, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR); cxhdl->video_chroma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR, 0, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR); cxhdl->video_temporal_filter_mode = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO, 0, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL); cxhdl->video_temporal_filter = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER, 0, 31, 1, 8); cxhdl->video_median_filter_type = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG, 0, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF); cxhdl->video_luma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM, 0, 255, 1, 0); cxhdl->video_luma_median_filter_top = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP, 0, 255, 1, 255); cxhdl->video_chroma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM, 0, 255, 1, 0); cxhdl->video_chroma_median_filter_top = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP, 0, 255, 1, 255); cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS, 0, 1, 1, 0); if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); return err; } v4l2_ctrl_cluster(8, &cxhdl->audio_sampling_freq); v4l2_ctrl_cluster(2, &cxhdl->video_b_frames); v4l2_ctrl_cluster(5, &cxhdl->stream_type); v4l2_ctrl_cluster(2, &cxhdl->video_mute); v4l2_ctrl_cluster(3, &cxhdl->video_spatial_filter_mode); v4l2_ctrl_cluster(2, &cxhdl->video_luma_spatial_filter_type); v4l2_ctrl_cluster(2, &cxhdl->video_spatial_filter); v4l2_ctrl_cluster(4, &cxhdl->video_luma_median_filter_top); return 0; } EXPORT_SYMBOL(cx2341x_handler_init); void cx2341x_handler_set_50hz(struct cx2341x_handler *cxhdl, int is_50hz) { cxhdl->is_50hz = is_50hz; cxhdl->video_gop_size->default_value = cxhdl->is_50hz ? 12 : 15; } EXPORT_SYMBOL(cx2341x_handler_set_50hz); int cx2341x_handler_setup(struct cx2341x_handler *cxhdl) { int h = cxhdl->height; int w = cxhdl->width; int err; err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_OUTPUT_PORT, 2, cxhdl->port, 0); if (err) return err; err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_RATE, 1, cxhdl->is_50hz); if (err) return err; if (v4l2_ctrl_g_ctrl(cxhdl->video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) { w /= 2; h /= 2; } err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_SIZE, 2, h, w); if (err) return err; return v4l2_ctrl_handler_setup(&cxhdl->hdl); } EXPORT_SYMBOL(cx2341x_handler_setup); void cx2341x_handler_set_busy(struct cx2341x_handler *cxhdl, int busy) { v4l2_ctrl_grab(cxhdl->audio_sampling_freq, busy); v4l2_ctrl_grab(cxhdl->audio_encoding, busy); v4l2_ctrl_grab(cxhdl->audio_l2_bitrate, busy); v4l2_ctrl_grab(cxhdl->audio_ac3_bitrate, busy); v4l2_ctrl_grab(cxhdl->stream_vbi_fmt, busy); v4l2_ctrl_grab(cxhdl->stream_type, busy); v4l2_ctrl_grab(cxhdl->video_bitrate_mode, busy); v4l2_ctrl_grab(cxhdl->video_bitrate, busy); v4l2_ctrl_grab(cxhdl->video_bitrate_peak, busy); } EXPORT_SYMBOL(cx2341x_handler_set_busy); |
3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 | // SPDX-License-Identifier: GPL-2.0-only /* * NFC hardware simulation driver * Copyright (c) 2013, Intel Corporation. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/nfc.h> #include <net/nfc/nfc.h> #include <net/nfc/digital.h> #define NFCSIM_ERR(d, fmt, args...) nfc_err(&d->nfc_digital_dev->nfc_dev->dev, \ "%s: " fmt, __func__, ## args) #define NFCSIM_DBG(d, fmt, args...) dev_dbg(&d->nfc_digital_dev->nfc_dev->dev, \ "%s: " fmt, __func__, ## args) #define NFCSIM_VERSION "0.2" #define NFCSIM_MODE_NONE 0 #define NFCSIM_MODE_INITIATOR 1 #define NFCSIM_MODE_TARGET 2 #define NFCSIM_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \ NFC_DIGITAL_DRV_CAPS_TG_CRC) struct nfcsim { struct nfc_digital_dev *nfc_digital_dev; struct work_struct recv_work; struct delayed_work send_work; struct nfcsim_link *link_in; struct nfcsim_link *link_out; bool up; u8 mode; u8 rf_tech; u16 recv_timeout; nfc_digital_cmd_complete_t cb; void *arg; u8 dropframe; }; struct nfcsim_link { struct mutex lock; u8 rf_tech; u8 mode; u8 shutdown; struct sk_buff *skb; wait_queue_head_t recv_wait; u8 cond; }; static struct nfcsim_link *nfcsim_link_new(void) { struct nfcsim_link *link; link = kzalloc(sizeof(struct nfcsim_link), GFP_KERNEL); if (!link) return NULL; mutex_init(&link->lock); init_waitqueue_head(&link->recv_wait); return link; } static void nfcsim_link_free(struct nfcsim_link *link) { dev_kfree_skb(link->skb); kfree(link); } static void nfcsim_link_recv_wake(struct nfcsim_link *link) { link->cond = 1; wake_up_interruptible(&link->recv_wait); } static void nfcsim_link_set_skb(struct nfcsim_link *link, struct sk_buff *skb, u8 rf_tech, u8 mode) { mutex_lock(&link->lock); dev_kfree_skb(link->skb); link->skb = skb; link->rf_tech = rf_tech; link->mode = mode; mutex_unlock(&link->lock); } static void nfcsim_link_recv_cancel(struct nfcsim_link *link) { mutex_lock(&link->lock); link->mode = NFCSIM_MODE_NONE; mutex_unlock(&link->lock); nfcsim_link_recv_wake(link); } static void nfcsim_link_shutdown(struct nfcsim_link *link) { mutex_lock(&link->lock); link->shutdown = 1; link->mode = NFCSIM_MODE_NONE; mutex_unlock(&link->lock); nfcsim_link_recv_wake(link); } static struct sk_buff *nfcsim_link_recv_skb(struct nfcsim_link *link, int timeout, u8 rf_tech, u8 mode) { int rc; struct sk_buff *skb; rc = wait_event_interruptible_timeout(link->recv_wait, link->cond, msecs_to_jiffies(timeout)); mutex_lock(&link->lock); skb = link->skb; link->skb = NULL; if (!rc) { rc = -ETIMEDOUT; goto done; } if (!skb || link->rf_tech != rf_tech || link->mode == mode) { rc = -EINVAL; goto done; } if (link->shutdown) { rc = -ENODEV; goto done; } done: mutex_unlock(&link->lock); if (rc < 0) { dev_kfree_skb(skb); skb = ERR_PTR(rc); } link->cond = 0; return skb; } static void nfcsim_send_wq(struct work_struct *work) { struct nfcsim *dev = container_of(work, struct nfcsim, send_work.work); /* * To effectively send data, the device just wake up its link_out which * is the link_in of the peer device. The exchanged skb has already been * stored in the dev->link_out through nfcsim_link_set_skb(). */ nfcsim_link_recv_wake(dev->link_out); } static void nfcsim_recv_wq(struct work_struct *work) { struct nfcsim *dev = container_of(work, struct nfcsim, recv_work); struct sk_buff *skb; skb = nfcsim_link_recv_skb(dev->link_in, dev->recv_timeout, dev->rf_tech, dev->mode); if (!dev->up) { NFCSIM_ERR(dev, "Device is down\n"); if (!IS_ERR(skb)) dev_kfree_skb(skb); return; } dev->cb(dev->nfc_digital_dev, dev->arg, skb); } static int nfcsim_send(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); u8 delay; if (!dev->up) { NFCSIM_ERR(dev, "Device is down\n"); return -ENODEV; } dev->recv_timeout = timeout; dev->cb = cb; dev->arg = arg; schedule_work(&dev->recv_work); if (dev->dropframe) { NFCSIM_DBG(dev, "dropping frame (out of %d)\n", dev->dropframe); dev_kfree_skb(skb); dev->dropframe--; return 0; } if (skb) { nfcsim_link_set_skb(dev->link_out, skb, dev->rf_tech, dev->mode); /* Add random delay (between 3 and 10 ms) before sending data */ get_random_bytes(&delay, 1); delay = 3 + (delay & 0x07); schedule_delayed_work(&dev->send_work, msecs_to_jiffies(delay)); } return 0; } static void nfcsim_abort_cmd(struct nfc_digital_dev *ddev) { const struct nfcsim *dev = nfc_digital_get_drvdata(ddev); nfcsim_link_recv_cancel(dev->link_in); } static int nfcsim_switch_rf(struct nfc_digital_dev *ddev, bool on) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); dev->up = on; return 0; } static int nfcsim_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: dev->up = true; dev->mode = NFCSIM_MODE_INITIATOR; dev->rf_tech = param; break; case NFC_DIGITAL_CONFIG_FRAMING: break; default: NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type); return -EINVAL; } return 0; } static int nfcsim_in_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, skb, timeout, cb, arg); } static int nfcsim_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: dev->up = true; dev->mode = NFCSIM_MODE_TARGET; dev->rf_tech = param; break; case NFC_DIGITAL_CONFIG_FRAMING: break; default: NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type); return -EINVAL; } return 0; } static int nfcsim_tg_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, skb, timeout, cb, arg); } static int nfcsim_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, NULL, timeout, cb, arg); } static const struct nfc_digital_ops nfcsim_digital_ops = { .in_configure_hw = nfcsim_in_configure_hw, .in_send_cmd = nfcsim_in_send_cmd, .tg_listen = nfcsim_tg_listen, .tg_configure_hw = nfcsim_tg_configure_hw, .tg_send_cmd = nfcsim_tg_send_cmd, .abort_cmd = nfcsim_abort_cmd, .switch_rf = nfcsim_switch_rf, }; static struct dentry *nfcsim_debugfs_root; static void nfcsim_debugfs_init(void) { nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL); } static void nfcsim_debugfs_remove(void) { debugfs_remove_recursive(nfcsim_debugfs_root); } static void nfcsim_debugfs_init_dev(struct nfcsim *dev) { struct dentry *dev_dir; char devname[5]; /* nfcX\0 */ u32 idx; int n; if (!nfcsim_debugfs_root) { NFCSIM_ERR(dev, "nfcsim debugfs not initialized\n"); return; } idx = dev->nfc_digital_dev->nfc_dev->idx; n = snprintf(devname, sizeof(devname), "nfc%d", idx); if (n >= sizeof(devname)) { NFCSIM_ERR(dev, "Could not compute dev name for dev %d\n", idx); return; } dev_dir = debugfs_create_dir(devname, nfcsim_debugfs_root); debugfs_create_u8("dropframe", 0664, dev_dir, &dev->dropframe); } static struct nfcsim *nfcsim_device_new(struct nfcsim_link *link_in, struct nfcsim_link *link_out) { struct nfcsim *dev; int rc; dev = kzalloc(sizeof(struct nfcsim), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); INIT_DELAYED_WORK(&dev->send_work, nfcsim_send_wq); INIT_WORK(&dev->recv_work, nfcsim_recv_wq); dev->nfc_digital_dev = nfc_digital_allocate_device(&nfcsim_digital_ops, NFC_PROTO_NFC_DEP_MASK, NFCSIM_CAPABILITIES, 0, 0); if (!dev->nfc_digital_dev) { kfree(dev); return ERR_PTR(-ENOMEM); } nfc_digital_set_drvdata(dev->nfc_digital_dev, dev); dev->link_in = link_in; dev->link_out = link_out; rc = nfc_digital_register_device(dev->nfc_digital_dev); if (rc) { pr_err("Could not register digital device (%d)\n", rc); nfc_digital_free_device(dev->nfc_digital_dev); kfree(dev); return ERR_PTR(rc); } nfcsim_debugfs_init_dev(dev); return dev; } static void nfcsim_device_free(struct nfcsim *dev) { nfc_digital_unregister_device(dev->nfc_digital_dev); dev->up = false; nfcsim_link_shutdown(dev->link_in); cancel_delayed_work_sync(&dev->send_work); cancel_work_sync(&dev->recv_work); nfc_digital_free_device(dev->nfc_digital_dev); kfree(dev); } static struct nfcsim *dev0; static struct nfcsim *dev1; static int __init nfcsim_init(void) { struct nfcsim_link *link0, *link1; int rc; link0 = nfcsim_link_new(); link1 = nfcsim_link_new(); if (!link0 || !link1) { rc = -ENOMEM; goto exit_err; } nfcsim_debugfs_init(); dev0 = nfcsim_device_new(link0, link1); if (IS_ERR(dev0)) { rc = PTR_ERR(dev0); goto exit_err; } dev1 = nfcsim_device_new(link1, link0); if (IS_ERR(dev1)) { nfcsim_device_free(dev0); rc = PTR_ERR(dev1); goto exit_err; } pr_info("nfcsim " NFCSIM_VERSION " initialized\n"); return 0; exit_err: pr_err("Failed to initialize nfcsim driver (%d)\n", rc); if (link0) nfcsim_link_free(link0); if (link1) nfcsim_link_free(link1); return rc; } static void __exit nfcsim_exit(void) { struct nfcsim_link *link0, *link1; link0 = dev0->link_in; link1 = dev0->link_out; nfcsim_device_free(dev0); nfcsim_device_free(dev1); nfcsim_link_free(link0); nfcsim_link_free(link1); nfcsim_debugfs_remove(); } module_init(nfcsim_init); module_exit(nfcsim_exit); MODULE_DESCRIPTION("NFCSim driver ver " NFCSIM_VERSION); MODULE_VERSION(NFCSIM_VERSION); MODULE_LICENSE("GPL"); |
161 161 161 161 161 160 160 142 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 | // SPDX-License-Identifier: LGPL-2.0+ /* Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Paul Eggert (eggert@twinsun.com). */ /* * dgb 10/02/98: ripped this from glibc source to help convert timestamps * to unix time * 10/04/98: added new table-based lookup after seeing how ugly * the gnu code is * blf 09/27/99: ripped out all the old code and inserted new table from * John Brockmeyer (without leap second corrections) * rewrote udf_stamp_to_time and fixed timezone accounting in * udf_time_to_stamp. */ /* * We don't take into account leap seconds. This may be correct or incorrect. * For more NIST information (especially dealing with leap seconds), see: * http://www.boulder.nist.gov/timefreq/pubs/bulletin/leapsecond.htm */ #include "udfdecl.h" #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> void udf_disk_stamp_to_time(struct timespec64 *dest, struct timestamp src) { u16 typeAndTimezone = le16_to_cpu(src.typeAndTimezone); u16 year = le16_to_cpu(src.year); uint8_t type = typeAndTimezone >> 12; int16_t offset; if (type == 1) { offset = typeAndTimezone << 4; /* sign extent offset */ offset = (offset >> 4); if (offset == -2047) /* unspecified offset */ offset = 0; } else offset = 0; dest->tv_sec = mktime64(year, src.month, src.day, src.hour, src.minute, src.second); dest->tv_sec -= offset * 60; /* * Sanitize nanosecond field since reportedly some filesystems are * recorded with bogus sub-second values. */ if (src.centiseconds < 100 && src.hundredsOfMicroseconds < 100 && src.microseconds < 100) { dest->tv_nsec = 1000 * (src.centiseconds * 10000 + src.hundredsOfMicroseconds * 100 + src.microseconds); } else { dest->tv_nsec = 0; } } void udf_time_to_disk_stamp(struct timestamp *dest, struct timespec64 ts) { time64_t seconds; int16_t offset; struct tm tm; offset = -sys_tz.tz_minuteswest; dest->typeAndTimezone = cpu_to_le16(0x1000 | (offset & 0x0FFF)); seconds = ts.tv_sec + offset * 60; time64_to_tm(seconds, 0, &tm); dest->year = cpu_to_le16(tm.tm_year + 1900); dest->month = tm.tm_mon + 1; dest->day = tm.tm_mday; dest->hour = tm.tm_hour; dest->minute = tm.tm_min; dest->second = tm.tm_sec; dest->centiseconds = ts.tv_nsec / 10000000; dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100; dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 - dest->hundredsOfMicroseconds * 100); } /* EOF */ |
14 14 14 10 10 10 10 5 5 5 14 14 14 14 14 14 14 14 14 14 14 14 8 14 14 14 14 14 2 2 2 2 2 2 2 14 14 14 14 4 4 4 4 4 2 1 1 1 1 1 1 2 1 14 8 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich */ #include "bat_iv_ogm.h" #include "main.h" #include <linux/atomic.h> #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/bug.h> #include <linux/byteorder/generic.h> #include <linux/cache.h> #include <linux/container_of.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/if_ether.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/minmax.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/pkt_sched.h> #include <linux/printk.h> #include <linux/random.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/string_choices.h> #include <linux/types.h> #include <linux/workqueue.h> #include <net/genetlink.h> #include <net/netlink.h> #include <uapi/linux/batadv_packet.h> #include <uapi/linux/batman_adv.h> #include "bat_algo.h" #include "bitarray.h" #include "gateway_client.h" #include "hard-interface.h" #include "hash.h" #include "log.h" #include "netlink.h" #include "network-coding.h" #include "originator.h" #include "routing.h" #include "send.h" #include "translation-table.h" #include "tvlv.h" static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work); /** * enum batadv_dup_status - duplicate status */ enum batadv_dup_status { /** @BATADV_NO_DUP: the packet is no duplicate */ BATADV_NO_DUP = 0, /** * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for * the neighbor) */ BATADV_ORIG_DUP, /** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */ BATADV_NEIGH_DUP, /** * @BATADV_PROTECTED: originator is currently protected (after reboot) */ BATADV_PROTECTED, }; /** * batadv_ring_buffer_set() - update the ring buffer with the given value * @lq_recv: pointer to the ring buffer * @lq_index: index to store the value at * @value: value to store in the ring buffer */ static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value) { lq_recv[*lq_index] = value; *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE; } /** * batadv_ring_buffer_avg() - compute the average of all non-zero values stored * in the given ring buffer * @lq_recv: pointer to the ring buffer * * Return: computed average value. */ static u8 batadv_ring_buffer_avg(const u8 lq_recv[]) { const u8 *ptr; u16 count = 0; u16 i = 0; u16 sum = 0; ptr = lq_recv; while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) { if (*ptr != 0) { count++; sum += *ptr; } i++; ptr++; } if (count == 0) return 0; return (u8)(sum / count); } /** * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an * originator * @bat_priv: the bat priv with all the mesh interface information * @addr: mac address of the originator * * Return: the originator object corresponding to the passed mac address or NULL * on failure. * If the object does not exist, it is created and initialised. */ static struct batadv_orig_node * batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) { struct batadv_orig_node *orig_node; int hash_added; orig_node = batadv_orig_hash_find(bat_priv, addr); if (orig_node) return orig_node; orig_node = batadv_orig_node_new(bat_priv, addr); if (!orig_node) return NULL; spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock); kref_get(&orig_node->refcount); hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, batadv_choose_orig, orig_node, &orig_node->hash_entry); if (hash_added != 0) goto free_orig_node_hash; return orig_node; free_orig_node_hash: /* reference for batadv_hash_add */ batadv_orig_node_put(orig_node); /* reference from batadv_orig_node_new */ batadv_orig_node_put(orig_node); return NULL; } static struct batadv_neigh_node * batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, const u8 *neigh_addr, struct batadv_orig_node *orig_node, struct batadv_orig_node *orig_neigh) { struct batadv_neigh_node *neigh_node; neigh_node = batadv_neigh_node_get_or_create(orig_node, hard_iface, neigh_addr); if (!neigh_node) goto out; neigh_node->orig_node = orig_neigh; out: return neigh_node; } static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) { struct batadv_ogm_packet *batadv_ogm_packet; unsigned char *ogm_buff; u32 random_seqno; mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); /* randomize initial seqno to avoid collision */ get_random_bytes(&random_seqno, sizeof(random_seqno)); atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno); hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN; ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC); if (!ogm_buff) { mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); return -ENOMEM; } hard_iface->bat_iv.ogm_buff = ogm_buff; batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; batadv_ogm_packet->packet_type = BATADV_IV_OGM; batadv_ogm_packet->version = BATADV_COMPAT_VERSION; batadv_ogm_packet->ttl = 2; batadv_ogm_packet->flags = BATADV_NO_FLAGS; batadv_ogm_packet->reserved = 0; batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); return 0; } static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) { mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); kfree(hard_iface->bat_iv.ogm_buff); hard_iface->bat_iv.ogm_buff = NULL; mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); } static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface) { struct batadv_ogm_packet *batadv_ogm_packet; void *ogm_buff; mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); ogm_buff = hard_iface->bat_iv.ogm_buff; if (!ogm_buff) goto unlock; batadv_ogm_packet = ogm_buff; ether_addr_copy(batadv_ogm_packet->orig, hard_iface->net_dev->dev_addr); ether_addr_copy(batadv_ogm_packet->prev_sender, hard_iface->net_dev->dev_addr); unlock: mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); } static void batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) { struct batadv_ogm_packet *batadv_ogm_packet; void *ogm_buff; mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); ogm_buff = hard_iface->bat_iv.ogm_buff; if (!ogm_buff) goto unlock; batadv_ogm_packet = ogm_buff; batadv_ogm_packet->ttl = BATADV_TTL; unlock: mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); } /* when do we schedule our own ogm to be sent */ static unsigned long batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv) { unsigned int msecs; msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER; msecs += get_random_u32_below(2 * BATADV_JITTER); return jiffies + msecs_to_jiffies(msecs); } /* when do we schedule a ogm packet to be sent */ static unsigned long batadv_iv_ogm_fwd_send_time(void) { return jiffies + msecs_to_jiffies(get_random_u32_below(BATADV_JITTER / 2)); } /* apply hop penalty for a normal link */ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv) { int hop_penalty = atomic_read(&bat_priv->hop_penalty); int new_tq; new_tq = tq * (BATADV_TQ_MAX_VALUE - hop_penalty); new_tq /= BATADV_TQ_MAX_VALUE; return new_tq; } /** * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached * @buff_pos: current position in the skb * @packet_len: total length of the skb * @ogm_packet: potential OGM in buffer * * Return: true if there is enough space for another OGM, false otherwise. */ static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, const struct batadv_ogm_packet *ogm_packet) { int next_buff_pos = 0; /* check if there is enough space for the header */ next_buff_pos += buff_pos + sizeof(*ogm_packet); if (next_buff_pos > packet_len) return false; /* check if there is enough space for the optional TVLV */ next_buff_pos += ntohs(ogm_packet->tvlv_len); return next_buff_pos <= packet_len; } /* send a batman ogm to a given interface */ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); const char *fwd_str; u8 packet_num; s16 buff_pos; struct batadv_ogm_packet *batadv_ogm_packet; struct sk_buff *skb; u8 *packet_pos; if (hard_iface->if_status != BATADV_IF_ACTIVE) return; packet_num = 0; buff_pos = 0; packet_pos = forw_packet->skb->data; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; /* adjust all flags and log packets */ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, batadv_ogm_packet)) { /* we might have aggregated direct link packets with an * ordinary base packet */ if (test_bit(packet_num, forw_packet->direct_link_flags) && forw_packet->if_incoming == hard_iface) batadv_ogm_packet->flags |= BATADV_DIRECTLINK; else batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK; if (packet_num > 0 || !forw_packet->own) fwd_str = "Forwarding"; else fwd_str = "Sending own"; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s) on interface %s [%pM]\n", fwd_str, (packet_num > 0 ? "aggregated " : ""), batadv_ogm_packet->orig, ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, batadv_ogm_packet->ttl, str_on_off(batadv_ogm_packet->flags & BATADV_DIRECTLINK), hard_iface->net_dev->name, hard_iface->net_dev->dev_addr); buff_pos += BATADV_OGM_HLEN; buff_pos += ntohs(batadv_ogm_packet->tvlv_len); packet_num++; packet_pos = forw_packet->skb->data + buff_pos; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; } /* create clone because function is called more than once */ skb = skb_clone(forw_packet->skb, GFP_ATOMIC); if (skb) { batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES, skb->len + ETH_HLEN); batadv_send_broadcast_skb(skb, hard_iface); } } /* send a batman ogm packet */ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) { struct net_device *mesh_iface; if (!forw_packet->if_incoming) { pr_err("Error - can't forward packet: incoming iface not specified\n"); return; } mesh_iface = forw_packet->if_incoming->mesh_iface; if (WARN_ON(!forw_packet->if_outgoing)) return; if (forw_packet->if_outgoing->mesh_iface != mesh_iface) { pr_warn("%s: mesh interface switch for queued OGM\n", __func__); return; } if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE) return; /* only for one specific outgoing interface */ batadv_iv_ogm_send_to_if(forw_packet, forw_packet->if_outgoing); } /** * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an * existing forward packet * @new_bat_ogm_packet: OGM packet to be aggregated * @bat_priv: the bat priv with all the mesh interface information * @packet_len: (total) length of the OGM * @send_time: timestamp (jiffies) when the packet is to be sent * @directlink: true if this is a direct link packet * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * @forw_packet: the forwarded packet which should be checked * * Return: true if new_packet can be aggregated with forw_packet */ static bool batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, struct batadv_priv *bat_priv, int packet_len, unsigned long send_time, bool directlink, const struct batadv_hard_iface *if_incoming, const struct batadv_hard_iface *if_outgoing, const struct batadv_forw_packet *forw_packet) { struct batadv_ogm_packet *batadv_ogm_packet; unsigned int aggregated_bytes = forw_packet->packet_len + packet_len; struct batadv_hard_iface *primary_if = NULL; u8 packet_num = forw_packet->num_packets; bool res = false; unsigned long aggregation_end_time; unsigned int max_bytes; batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data; aggregation_end_time = send_time; aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS); max_bytes = min_t(unsigned int, if_outgoing->net_dev->mtu, BATADV_MAX_AGGREGATION_BYTES); /* we can aggregate the current packet to this aggregated packet * if: * * - the send time is within our MAX_AGGREGATION_MS time * - the resulting packet won't be bigger than * MAX_AGGREGATION_BYTES and MTU of the outgoing interface * - the number of packets is lower than MAX_AGGREGATION_PACKETS * otherwise aggregation is not possible */ if (!time_before(send_time, forw_packet->send_time) || !time_after_eq(aggregation_end_time, forw_packet->send_time)) return false; if (aggregated_bytes > max_bytes) return false; if (packet_num >= BATADV_MAX_AGGREGATION_PACKETS) return false; /* packet is not leaving on the same interface. */ if (forw_packet->if_outgoing != if_outgoing) return false; /* check aggregation compatibility * -> direct link packets are broadcasted on * their interface only * -> aggregate packet if the current packet is * a "global" packet as well as the base * packet */ primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) return false; /* packets without direct link flag and high TTL * are flooded through the net */ if (!directlink && !(batadv_ogm_packet->flags & BATADV_DIRECTLINK) && batadv_ogm_packet->ttl != 1 && /* own packets originating non-primary * interfaces leave only that interface */ (!forw_packet->own || forw_packet->if_incoming == primary_if)) { res = true; goto out; } /* if the incoming packet is sent via this one * interface only - we still can aggregate */ if (directlink && new_bat_ogm_packet->ttl == 1 && forw_packet->if_incoming == if_incoming && /* packets from direct neighbors or * own secondary interface packets * (= secondary interface packets in general) */ (batadv_ogm_packet->flags & BATADV_DIRECTLINK || (forw_packet->own && forw_packet->if_incoming != primary_if))) { res = true; goto out; } out: batadv_hardif_put(primary_if); return res; } /** * batadv_iv_ogm_aggregate_new() - create a new aggregated packet and add this * packet to it. * @packet_buff: pointer to the OGM * @packet_len: (total) length of the OGM * @send_time: timestamp (jiffies) when the packet is to be sent * @direct_link: whether this OGM has direct link status * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * @own_packet: true if it is a self-generated ogm */ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, int packet_len, unsigned long send_time, bool direct_link, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, int own_packet) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_forw_packet *forw_packet_aggr; struct sk_buff *skb; unsigned char *skb_buff; unsigned int skb_size; atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left; if (atomic_read(&bat_priv->aggregated_ogms)) skb_size = max_t(unsigned int, BATADV_MAX_AGGREGATION_BYTES, packet_len); else skb_size = packet_len; skb_size += ETH_HLEN; skb = netdev_alloc_skb_ip_align(NULL, skb_size); if (!skb) return; forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing, queue_left, bat_priv, skb); if (!forw_packet_aggr) { kfree_skb(skb); return; } forw_packet_aggr->skb->priority = TC_PRIO_CONTROL; skb_reserve(forw_packet_aggr->skb, ETH_HLEN); skb_buff = skb_put(forw_packet_aggr->skb, packet_len); forw_packet_aggr->packet_len = packet_len; memcpy(skb_buff, packet_buff, packet_len); forw_packet_aggr->own = own_packet; bitmap_zero(forw_packet_aggr->direct_link_flags, BATADV_MAX_AGGREGATION_PACKETS); forw_packet_aggr->send_time = send_time; /* save packet direct link flag status */ if (direct_link) set_bit(0, forw_packet_aggr->direct_link_flags); INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, batadv_iv_send_outstanding_bat_ogm_packet); batadv_forw_packet_ogmv1_queue(bat_priv, forw_packet_aggr, send_time); } /* aggregate a new packet into the existing ogm packet */ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr, const unsigned char *packet_buff, int packet_len, bool direct_link) { skb_put_data(forw_packet_aggr->skb, packet_buff, packet_len); forw_packet_aggr->packet_len += packet_len; /* save packet direct link flag status */ if (direct_link) set_bit(forw_packet_aggr->num_packets, forw_packet_aggr->direct_link_flags); forw_packet_aggr->num_packets++; } /** * batadv_iv_ogm_queue_add() - queue up an OGM for transmission * @bat_priv: the bat priv with all the mesh interface information * @packet_buff: pointer to the OGM * @packet_len: (total) length of the OGM * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * @own_packet: true if it is a self-generated ogm * @send_time: timestamp (jiffies) when the packet is to be sent */ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, unsigned char *packet_buff, int packet_len, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, int own_packet, unsigned long send_time) { /* _aggr -> pointer to the packet we want to aggregate with * _pos -> pointer to the position in the queue */ struct batadv_forw_packet *forw_packet_aggr = NULL; struct batadv_forw_packet *forw_packet_pos = NULL; struct batadv_ogm_packet *batadv_ogm_packet; bool direct_link; unsigned long max_aggregation_jiffies; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; direct_link = !!(batadv_ogm_packet->flags & BATADV_DIRECTLINK); max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS); /* find position for the packet in the forward queue */ spin_lock_bh(&bat_priv->forw_bat_list_lock); /* own packets are not to be aggregated */ if (atomic_read(&bat_priv->aggregated_ogms) && !own_packet) { hlist_for_each_entry(forw_packet_pos, &bat_priv->forw_bat_list, list) { if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, bat_priv, packet_len, send_time, direct_link, if_incoming, if_outgoing, forw_packet_pos)) { forw_packet_aggr = forw_packet_pos; break; } } } /* nothing to aggregate with - either aggregation disabled or no * suitable aggregation packet found */ if (!forw_packet_aggr) { /* the following section can run without the lock */ spin_unlock_bh(&bat_priv->forw_bat_list_lock); /* if we could not aggregate this packet with one of the others * we hold it back for a while, so that it might be aggregated * later on */ if (!own_packet && atomic_read(&bat_priv->aggregated_ogms)) send_time += max_aggregation_jiffies; batadv_iv_ogm_aggregate_new(packet_buff, packet_len, send_time, direct_link, if_incoming, if_outgoing, own_packet); } else { batadv_iv_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len, direct_link); spin_unlock_bh(&bat_priv->forw_bat_list_lock); } } static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, const struct ethhdr *ethhdr, struct batadv_ogm_packet *batadv_ogm_packet, bool is_single_hop_neigh, bool is_from_best_next_hop, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); u16 tvlv_len; if (batadv_ogm_packet->ttl <= 1) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); return; } if (!is_from_best_next_hop) { /* Mark the forwarded packet when it is not coming from our * best next hop. We still need to forward the packet for our * neighbor link quality detection to work in case the packet * originated from a single hop neighbor. Otherwise we can * simply drop the ogm. */ if (is_single_hop_neigh) batadv_ogm_packet->flags |= BATADV_NOT_BEST_NEXT_HOP; else return; } tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); batadv_ogm_packet->ttl--; ether_addr_copy(batadv_ogm_packet->prev_sender, ethhdr->h_source); /* apply hop penalty */ batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq, bat_priv); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Forwarding packet: tq: %i, ttl: %i\n", batadv_ogm_packet->tq, batadv_ogm_packet->ttl); if (is_single_hop_neigh) batadv_ogm_packet->flags |= BATADV_DIRECTLINK; else batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK; batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet, BATADV_OGM_HLEN + tvlv_len, if_incoming, if_outgoing, 0, batadv_iv_ogm_fwd_send_time()); } /** * batadv_iv_ogm_slide_own_bcast_window() - bitshift own OGM broadcast windows * for the given interface * @hard_iface: the interface for which the windows have to be shifted */ static void batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; struct batadv_orig_node *orig_node; struct batadv_orig_ifinfo *orig_ifinfo; unsigned long *word; u32 i; u8 *w; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) { if (orig_ifinfo->if_outgoing != hard_iface) continue; spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); word = orig_ifinfo->bat_iv.bcast_own; batadv_bit_get_packet(bat_priv, word, 1, 0); w = &orig_ifinfo->bat_iv.bcast_own_sum; *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); } } rcu_read_unlock(); } } /** * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer * @hard_iface: interface whose ogm buffer should be transmitted */ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff; struct batadv_ogm_packet *batadv_ogm_packet; struct batadv_hard_iface *primary_if, *tmp_hard_iface; int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len; u32 seqno; u16 tvlv_len = 0; unsigned long send_time; lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex); /* interface already disabled by batadv_iv_ogm_iface_disable */ if (!*ogm_buff) return; /* the interface gets activated here to avoid race conditions between * the moment of activating the interface in * hardif_activate_interface() where the originator mac is set and * outdated packets (especially uninitialized mac addresses) in the * packet queue */ if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED) hard_iface->if_status = BATADV_IF_ACTIVE; primary_if = batadv_primary_if_get_selected(bat_priv); if (hard_iface == primary_if) { /* tt changes have to be committed before the tvlv data is * appended as it may alter the tt tvlv container */ batadv_tt_local_commit_changes(bat_priv); tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, ogm_buff, ogm_buff_len, BATADV_OGM_HLEN); } batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff); batadv_ogm_packet->tvlv_len = htons(tvlv_len); /* change sequence number to network order */ seqno = (u32)atomic_read(&hard_iface->bat_iv.ogm_seqno); batadv_ogm_packet->seqno = htonl(seqno); atomic_inc(&hard_iface->bat_iv.ogm_seqno); batadv_iv_ogm_slide_own_bcast_window(hard_iface); send_time = batadv_iv_ogm_emit_send_time(bat_priv); if (hard_iface != primary_if) { /* OGMs from secondary interfaces are only scheduled on their * respective interfaces. */ batadv_iv_ogm_queue_add(bat_priv, *ogm_buff, *ogm_buff_len, hard_iface, hard_iface, 1, send_time); goto out; } /* OGMs from primary interfaces are scheduled on all * interfaces. */ rcu_read_lock(); list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) { if (tmp_hard_iface->mesh_iface != hard_iface->mesh_iface) continue; if (!kref_get_unless_zero(&tmp_hard_iface->refcount)) continue; batadv_iv_ogm_queue_add(bat_priv, *ogm_buff, *ogm_buff_len, hard_iface, tmp_hard_iface, 1, send_time); batadv_hardif_put(tmp_hard_iface); } rcu_read_unlock(); out: batadv_hardif_put(primary_if); } static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) { if (hard_iface->if_status == BATADV_IF_NOT_IN_USE || hard_iface->if_status == BATADV_IF_TO_BE_REMOVED) return; mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); batadv_iv_ogm_schedule_buff(hard_iface); mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); } /** * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over interface * @orig_node: originator which reproadcasted the OGMs directly * @if_outgoing: interface which transmitted the original OGM and received the * direct rebroadcast * * Return: Number of replied (rebroadcasted) OGMs which were transmitted by * an originator and directly (without intermediate hop) received by a specific * interface */ static u8 batadv_iv_orig_ifinfo_sum(struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_outgoing) { struct batadv_orig_ifinfo *orig_ifinfo; u8 sum; orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing); if (!orig_ifinfo) return 0; spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); sum = orig_ifinfo->bat_iv.bcast_own_sum; spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); batadv_orig_ifinfo_put(orig_ifinfo); return sum; } /** * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an * originator * @bat_priv: the bat priv with all the mesh interface information * @orig_node: the orig node who originally emitted the ogm packet * @orig_ifinfo: ifinfo for the outgoing interface of the orig_node * @ethhdr: Ethernet header of the OGM * @batadv_ogm_packet: the ogm packet * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * @dup_status: the duplicate status of this ogm packet. */ static void batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_orig_ifinfo *orig_ifinfo, const struct ethhdr *ethhdr, const struct batadv_ogm_packet *batadv_ogm_packet, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, enum batadv_dup_status dup_status) { struct batadv_neigh_ifinfo *neigh_ifinfo = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *neigh_node = NULL; struct batadv_neigh_node *tmp_neigh_node = NULL; struct batadv_neigh_node *router = NULL; u8 sum_orig, sum_neigh; u8 *neigh_addr; u8 tq_avg; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s(): Searching and updating originator entry of received packet\n", __func__); rcu_read_lock(); hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { neigh_addr = tmp_neigh_node->addr; if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && tmp_neigh_node->if_incoming == if_incoming && kref_get_unless_zero(&tmp_neigh_node->refcount)) { if (WARN(neigh_node, "too many matching neigh_nodes")) batadv_neigh_node_put(neigh_node); neigh_node = tmp_neigh_node; continue; } if (dup_status != BATADV_NO_DUP) continue; /* only update the entry for this outgoing interface */ neigh_ifinfo = batadv_neigh_ifinfo_get(tmp_neigh_node, if_outgoing); if (!neigh_ifinfo) continue; spin_lock_bh(&tmp_neigh_node->ifinfo_lock); batadv_ring_buffer_set(neigh_ifinfo->bat_iv.tq_recv, &neigh_ifinfo->bat_iv.tq_index, 0); tq_avg = batadv_ring_buffer_avg(neigh_ifinfo->bat_iv.tq_recv); neigh_ifinfo->bat_iv.tq_avg = tq_avg; spin_unlock_bh(&tmp_neigh_node->ifinfo_lock); batadv_neigh_ifinfo_put(neigh_ifinfo); neigh_ifinfo = NULL; } if (!neigh_node) { struct batadv_orig_node *orig_tmp; orig_tmp = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source); if (!orig_tmp) goto unlock; neigh_node = batadv_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, orig_node, orig_tmp); batadv_orig_node_put(orig_tmp); if (!neigh_node) goto unlock; } else { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Updating existing last-hop neighbor of originator\n"); } rcu_read_unlock(); neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); if (!neigh_ifinfo) goto out; neigh_node->last_seen = jiffies; spin_lock_bh(&neigh_node->ifinfo_lock); batadv_ring_buffer_set(neigh_ifinfo->bat_iv.tq_recv, &neigh_ifinfo->bat_iv.tq_index, batadv_ogm_packet->tq); tq_avg = batadv_ring_buffer_avg(neigh_ifinfo->bat_iv.tq_recv); neigh_ifinfo->bat_iv.tq_avg = tq_avg; spin_unlock_bh(&neigh_node->ifinfo_lock); if (dup_status == BATADV_NO_DUP) { orig_ifinfo->last_ttl = batadv_ogm_packet->ttl; neigh_ifinfo->last_ttl = batadv_ogm_packet->ttl; } /* if this neighbor already is our next hop there is nothing * to change */ router = batadv_orig_router_get(orig_node, if_outgoing); if (router == neigh_node) goto out; if (router) { router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing); if (!router_ifinfo) goto out; /* if this neighbor does not offer a better TQ we won't * consider it */ if (router_ifinfo->bat_iv.tq_avg > neigh_ifinfo->bat_iv.tq_avg) goto out; } /* if the TQ is the same and the link not more symmetric we * won't consider it either */ if (router_ifinfo && neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) { sum_orig = batadv_iv_orig_ifinfo_sum(router->orig_node, router->if_incoming); sum_neigh = batadv_iv_orig_ifinfo_sum(neigh_node->orig_node, neigh_node->if_incoming); if (sum_orig >= sum_neigh) goto out; } batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node); goto out; unlock: rcu_read_unlock(); out: batadv_neigh_node_put(neigh_node); batadv_neigh_node_put(router); batadv_neigh_ifinfo_put(neigh_ifinfo); batadv_neigh_ifinfo_put(router_ifinfo); } /** * batadv_iv_ogm_calc_tq() - calculate tq for current received ogm packet * @orig_node: the orig node who originally emitted the ogm packet * @orig_neigh_node: the orig node struct of the neighbor who sent the packet * @batadv_ogm_packet: the ogm packet * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * * Return: true if the link can be considered bidirectional, false otherwise */ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, struct batadv_orig_node *orig_neigh_node, struct batadv_ogm_packet *batadv_ogm_packet, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; struct batadv_neigh_ifinfo *neigh_ifinfo; u8 total_count; u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; unsigned int tq_iface_hop_penalty = BATADV_TQ_MAX_VALUE; unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; unsigned int tq_asym_penalty, inv_asym_penalty; unsigned int combined_tq; bool ret = false; /* find corresponding one hop neighbor */ rcu_read_lock(); hlist_for_each_entry_rcu(tmp_neigh_node, &orig_neigh_node->neigh_list, list) { if (!batadv_compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig)) continue; if (tmp_neigh_node->if_incoming != if_incoming) continue; if (!kref_get_unless_zero(&tmp_neigh_node->refcount)) continue; neigh_node = tmp_neigh_node; break; } rcu_read_unlock(); if (!neigh_node) neigh_node = batadv_iv_ogm_neigh_new(if_incoming, orig_neigh_node->orig, orig_neigh_node, orig_neigh_node); if (!neigh_node) goto out; /* if orig_node is direct neighbor update neigh_node last_seen */ if (orig_node == orig_neigh_node) neigh_node->last_seen = jiffies; orig_node->last_seen = jiffies; /* find packet count of corresponding one hop neighbor */ orig_eq_count = batadv_iv_orig_ifinfo_sum(orig_neigh_node, if_incoming); neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); if (neigh_ifinfo) { neigh_rq_count = neigh_ifinfo->bat_iv.real_packet_count; batadv_neigh_ifinfo_put(neigh_ifinfo); } else { neigh_rq_count = 0; } /* pay attention to not get a value bigger than 100 % */ if (orig_eq_count > neigh_rq_count) total_count = neigh_rq_count; else total_count = orig_eq_count; /* if we have too few packets (too less data) we set tq_own to zero * if we receive too few packets it is not considered bidirectional */ if (total_count < BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM || neigh_rq_count < BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM) tq_own = 0; else /* neigh_node->real_packet_count is never zero as we * only purge old information when getting new * information */ tq_own = (BATADV_TQ_MAX_VALUE * total_count) / neigh_rq_count; /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does * affect the nearly-symmetric links only a little, but * punishes asymmetric links more. This will give a value * between 0 and TQ_MAX_VALUE */ neigh_rq_inv = BATADV_TQ_LOCAL_WINDOW_SIZE - neigh_rq_count; neigh_rq_inv_cube = neigh_rq_inv * neigh_rq_inv * neigh_rq_inv; neigh_rq_max_cube = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE; inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube; inv_asym_penalty /= neigh_rq_max_cube; tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty; tq_iface_hop_penalty -= atomic_read(&if_incoming->hop_penalty); /* penalize if the OGM is forwarded on the same interface. WiFi * interfaces and other half duplex devices suffer from throughput * drops as they can't send and receive at the same time. */ if (if_outgoing && if_incoming == if_outgoing && batadv_is_wifi_hardif(if_outgoing)) tq_iface_hop_penalty = batadv_hop_penalty(tq_iface_hop_penalty, bat_priv); combined_tq = batadv_ogm_packet->tq * tq_own * tq_asym_penalty * tq_iface_hop_penalty; combined_tq /= BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE; batadv_ogm_packet->tq = combined_tq; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_hop_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", orig_node->orig, orig_neigh_node->orig, total_count, neigh_rq_count, tq_own, tq_asym_penalty, tq_iface_hop_penalty, batadv_ogm_packet->tq, if_incoming->net_dev->name, if_outgoing ? if_outgoing->net_dev->name : "DEFAULT"); /* if link has the minimum required transmission quality * consider it bidirectional */ if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT) ret = true; out: batadv_neigh_node_put(neigh_node); return ret; } /** * batadv_iv_ogm_update_seqnos() - process a batman packet for all interfaces, * adjust the sequence number and find out whether it is a duplicate * @ethhdr: ethernet header of the packet * @batadv_ogm_packet: OGM packet to be considered * @if_incoming: interface on which the OGM packet was received * @if_outgoing: interface for which the retransmission should be considered * * Return: duplicate status as enum batadv_dup_status */ static enum batadv_dup_status batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, const struct batadv_ogm_packet *batadv_ogm_packet, const struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_orig_node *orig_node; struct batadv_orig_ifinfo *orig_ifinfo = NULL; struct batadv_neigh_node *neigh_node; struct batadv_neigh_ifinfo *neigh_ifinfo; bool is_dup; s32 seq_diff; bool need_update = false; int set_mark; enum batadv_dup_status ret = BATADV_NO_DUP; u32 seqno = ntohl(batadv_ogm_packet->seqno); u8 *neigh_addr; u8 packet_count; unsigned long *bitmap; orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig); if (!orig_node) return BATADV_NO_DUP; orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing); if (WARN_ON(!orig_ifinfo)) { batadv_orig_node_put(orig_node); return 0; } spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); seq_diff = seqno - orig_ifinfo->last_real_seqno; /* signalize caller that the packet is to be dropped. */ if (!hlist_empty(&orig_node->neigh_list) && batadv_window_protected(bat_priv, seq_diff, BATADV_TQ_LOCAL_WINDOW_SIZE, &orig_ifinfo->batman_seqno_reset, NULL)) { ret = BATADV_PROTECTED; goto out; } rcu_read_lock(); hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) { neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); if (!neigh_ifinfo) continue; neigh_addr = neigh_node->addr; is_dup = batadv_test_bit(neigh_ifinfo->bat_iv.real_bits, orig_ifinfo->last_real_seqno, seqno); if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && neigh_node->if_incoming == if_incoming) { set_mark = 1; if (is_dup) ret = BATADV_NEIGH_DUP; } else { set_mark = 0; if (is_dup && ret != BATADV_NEIGH_DUP) ret = BATADV_ORIG_DUP; } /* if the window moved, set the update flag. */ bitmap = neigh_ifinfo->bat_iv.real_bits; need_update |= batadv_bit_get_packet(bat_priv, bitmap, seq_diff, set_mark); packet_count = bitmap_weight(bitmap, BATADV_TQ_LOCAL_WINDOW_SIZE); neigh_ifinfo->bat_iv.real_packet_count = packet_count; batadv_neigh_ifinfo_put(neigh_ifinfo); } rcu_read_unlock(); if (need_update) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s updating last_seqno: old %u, new %u\n", if_outgoing ? if_outgoing->net_dev->name : "DEFAULT", orig_ifinfo->last_real_seqno, seqno); orig_ifinfo->last_real_seqno = seqno; } out: spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); batadv_orig_node_put(orig_node); batadv_orig_ifinfo_put(orig_ifinfo); return ret; } /** * batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing * interface * @skb: the skb containing the OGM * @ogm_offset: offset from skb->data to start of ogm header * @orig_node: the (cached) orig node for the originator of this OGM * @if_incoming: the interface where this packet was received * @if_outgoing: the interface for which the packet should be considered */ static void batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset, struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_hardif_neigh_node *hardif_neigh = NULL; struct batadv_neigh_node *router = NULL; struct batadv_neigh_node *router_router = NULL; struct batadv_orig_node *orig_neigh_node; struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_neigh_node *orig_neigh_router = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_ogm_packet *ogm_packet; enum batadv_dup_status dup_status; bool is_from_best_next_hop = false; bool is_single_hop_neigh = false; bool sameseq, similar_ttl; struct sk_buff *skb_priv; struct ethhdr *ethhdr; u8 *prev_sender; bool is_bidirect; /* create a private copy of the skb, as some functions change tq value * and/or flags. */ skb_priv = skb_copy(skb, GFP_ATOMIC); if (!skb_priv) return; ethhdr = eth_hdr(skb_priv); ogm_packet = (struct batadv_ogm_packet *)(skb_priv->data + ogm_offset); dup_status = batadv_iv_ogm_update_seqnos(ethhdr, ogm_packet, if_incoming, if_outgoing); if (batadv_compare_eth(ethhdr->h_source, ogm_packet->orig)) is_single_hop_neigh = true; if (dup_status == BATADV_PROTECTED) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: packet within seqno protection time (sender: %pM)\n", ethhdr->h_source); goto out; } if (ogm_packet->tq == 0) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: originator packet with tq equal 0\n"); goto out; } if (is_single_hop_neigh) { hardif_neigh = batadv_hardif_neigh_get(if_incoming, ethhdr->h_source); if (hardif_neigh) hardif_neigh->last_seen = jiffies; } router = batadv_orig_router_get(orig_node, if_outgoing); if (router) { router_router = batadv_orig_router_get(router->orig_node, if_outgoing); router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing); } if ((router_ifinfo && router_ifinfo->bat_iv.tq_avg != 0) && (batadv_compare_eth(router->addr, ethhdr->h_source))) is_from_best_next_hop = true; prev_sender = ogm_packet->prev_sender; /* avoid temporary routing loops */ if (router && router_router && (batadv_compare_eth(router->addr, prev_sender)) && !(batadv_compare_eth(ogm_packet->orig, prev_sender)) && (batadv_compare_eth(router->addr, router_router->addr))) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n", ethhdr->h_source); goto out; } if (if_outgoing == BATADV_IF_DEFAULT) batadv_tvlv_ogm_receive(bat_priv, ogm_packet, orig_node); /* if sender is a direct neighbor the sender mac equals * originator mac */ if (is_single_hop_neigh) orig_neigh_node = orig_node; else orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source); if (!orig_neigh_node) goto out; /* Update nc_nodes of the originator */ batadv_nc_update_nc_node(bat_priv, orig_node, orig_neigh_node, ogm_packet, is_single_hop_neigh); orig_neigh_router = batadv_orig_router_get(orig_neigh_node, if_outgoing); /* drop packet if sender is not a direct neighbor and if we * don't route towards it */ if (!is_single_hop_neigh && !orig_neigh_router) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: OGM via unknown neighbor!\n"); goto out_neigh; } is_bidirect = batadv_iv_ogm_calc_tq(orig_node, orig_neigh_node, ogm_packet, if_incoming, if_outgoing); /* update ranking if it is not a duplicate or has the same * seqno and similar ttl as the non-duplicate */ orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing); if (!orig_ifinfo) goto out_neigh; sameseq = orig_ifinfo->last_real_seqno == ntohl(ogm_packet->seqno); similar_ttl = (orig_ifinfo->last_ttl - 3) <= ogm_packet->ttl; if (is_bidirect && (dup_status == BATADV_NO_DUP || (sameseq && similar_ttl))) { batadv_iv_ogm_orig_update(bat_priv, orig_node, orig_ifinfo, ethhdr, ogm_packet, if_incoming, if_outgoing, dup_status); } batadv_orig_ifinfo_put(orig_ifinfo); /* only forward for specific interface, not for the default one. */ if (if_outgoing == BATADV_IF_DEFAULT) goto out_neigh; /* is single hop (direct) neighbor */ if (is_single_hop_neigh) { /* OGMs from secondary interfaces should only scheduled once * per interface where it has been received, not multiple times */ if (ogm_packet->ttl <= 2 && if_incoming != if_outgoing) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: OGM from secondary interface and wrong outgoing interface\n"); goto out_neigh; } /* mark direct link on incoming interface */ batadv_iv_ogm_forward(orig_node, ethhdr, ogm_packet, is_single_hop_neigh, is_from_best_next_hop, if_incoming, if_outgoing); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); goto out_neigh; } /* multihop originator */ if (!is_bidirect) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: not received via bidirectional link\n"); goto out_neigh; } if (dup_status == BATADV_NEIGH_DUP) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: duplicate packet received\n"); goto out_neigh; } batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast originator packet\n"); batadv_iv_ogm_forward(orig_node, ethhdr, ogm_packet, is_single_hop_neigh, is_from_best_next_hop, if_incoming, if_outgoing); out_neigh: if (orig_neigh_node && !is_single_hop_neigh) batadv_orig_node_put(orig_neigh_node); out: batadv_neigh_ifinfo_put(router_ifinfo); batadv_neigh_node_put(router); batadv_neigh_node_put(router_router); batadv_neigh_node_put(orig_neigh_router); batadv_hardif_neigh_put(hardif_neigh); consume_skb(skb_priv); } /** * batadv_iv_ogm_process_reply() - Check OGM for direct reply and process it * @ogm_packet: rebroadcast OGM packet to process * @if_incoming: the interface where this packet was received * @orig_node: originator which reproadcasted the OGMs * @if_incoming_seqno: OGM sequence number when rebroadcast was received */ static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet, struct batadv_hard_iface *if_incoming, struct batadv_orig_node *orig_node, u32 if_incoming_seqno) { struct batadv_orig_ifinfo *orig_ifinfo; s32 bit_pos; u8 *weight; /* neighbor has to indicate direct link and it has to * come via the corresponding interface */ if (!(ogm_packet->flags & BATADV_DIRECTLINK)) return; if (!batadv_compare_eth(if_incoming->net_dev->dev_addr, ogm_packet->orig)) return; orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_incoming); if (!orig_ifinfo) return; /* save packet seqno for bidirectional check */ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); bit_pos = if_incoming_seqno - 2; bit_pos -= ntohl(ogm_packet->seqno); batadv_set_bit(orig_ifinfo->bat_iv.bcast_own, bit_pos); weight = &orig_ifinfo->bat_iv.bcast_own_sum; *weight = bitmap_weight(orig_ifinfo->bat_iv.bcast_own, BATADV_TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); batadv_orig_ifinfo_put(orig_ifinfo); } /** * batadv_iv_ogm_process() - process an incoming batman iv OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) * @if_incoming: the interface where this packet was received */ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_orig_node *orig_neigh_node, *orig_node; struct batadv_hard_iface *hard_iface; struct batadv_ogm_packet *ogm_packet; u32 if_incoming_seqno; bool has_directlink_flag; struct ethhdr *ethhdr; bool is_my_oldorig = false; bool is_my_addr = false; bool is_my_orig = false; ogm_packet = (struct batadv_ogm_packet *)(skb->data + ogm_offset); ethhdr = eth_hdr(skb); /* Silently drop when the batman packet is actually not a * correct packet. * * This might happen if a packet is padded (e.g. Ethernet has a * minimum frame length of 64 byte) and the aggregation interprets * it as an additional length. * * TODO: A more sane solution would be to have a bit in the * batadv_ogm_packet to detect whether the packet is the last * packet in an aggregation. Here we expect that the padding * is always zero (or not 0x01) */ if (ogm_packet->packet_type != BATADV_IV_OGM) return; /* could be changed by schedule_own_packet() */ if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno); if (ogm_packet->flags & BATADV_DIRECTLINK) has_directlink_flag = true; else has_directlink_flag = false; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, tq %d, TTL %d, V %d, IDF %d)\n", ethhdr->h_source, if_incoming->net_dev->name, if_incoming->net_dev->dev_addr, ogm_packet->orig, ogm_packet->prev_sender, ntohl(ogm_packet->seqno), ogm_packet->tq, ogm_packet->ttl, ogm_packet->version, has_directlink_flag); rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; if (hard_iface->mesh_iface != if_incoming->mesh_iface) continue; if (batadv_compare_eth(ethhdr->h_source, hard_iface->net_dev->dev_addr)) is_my_addr = true; if (batadv_compare_eth(ogm_packet->orig, hard_iface->net_dev->dev_addr)) is_my_orig = true; if (batadv_compare_eth(ogm_packet->prev_sender, hard_iface->net_dev->dev_addr)) is_my_oldorig = true; } rcu_read_unlock(); if (is_my_addr) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: received my own broadcast (sender: %pM)\n", ethhdr->h_source); return; } if (is_my_orig) { orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source); if (!orig_neigh_node) return; batadv_iv_ogm_process_reply(ogm_packet, if_incoming, orig_neigh_node, if_incoming_seqno); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: originator packet from myself (via neighbor)\n"); batadv_orig_node_put(orig_neigh_node); return; } if (is_my_oldorig) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n", ethhdr->h_source); return; } if (ogm_packet->flags & BATADV_NOT_BEST_NEXT_HOP) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n", ethhdr->h_source); return; } orig_node = batadv_iv_ogm_orig_get(bat_priv, ogm_packet->orig); if (!orig_node) return; batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node, if_incoming, BATADV_IF_DEFAULT); rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) continue; batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node, if_incoming, hard_iface); batadv_hardif_put(hard_iface); } rcu_read_unlock(); batadv_orig_node_put(orig_node); } static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work) { struct delayed_work *delayed_work; struct batadv_forw_packet *forw_packet; struct batadv_priv *bat_priv; bool dropped = false; delayed_work = to_delayed_work(work); forw_packet = container_of(delayed_work, struct batadv_forw_packet, delayed_work); bat_priv = netdev_priv(forw_packet->if_incoming->mesh_iface); if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) { dropped = true; goto out; } batadv_iv_ogm_emit(forw_packet); /* we have to have at least one packet in the queue to determine the * queues wake up time unless we are shutting down. * * only re-schedule if this is the "original" copy, e.g. the OGM of the * primary interface should only be rescheduled once per period, but * this function will be called for the forw_packet instances of the * other secondary interfaces as well. */ if (forw_packet->own && forw_packet->if_incoming == forw_packet->if_outgoing) batadv_iv_ogm_schedule(forw_packet->if_incoming); out: /* do we get something for free()? */ if (batadv_forw_packet_steal(forw_packet, &bat_priv->forw_bat_list_lock)) batadv_forw_packet_free(forw_packet, dropped); } static int batadv_iv_ogm_receive(struct sk_buff *skb, struct batadv_hard_iface *if_incoming) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_ogm_packet *ogm_packet; u8 *packet_pos; int ogm_offset; bool res; int ret = NET_RX_DROP; res = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN); if (!res) goto free_skb; /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface * that does not have B.A.T.M.A.N. IV enabled ? */ if (bat_priv->algo_ops->iface.enable != batadv_iv_ogm_iface_enable) goto free_skb; batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); ogm_offset = 0; ogm_packet = (struct batadv_ogm_packet *)skb->data; /* unpack the aggregated packets and process them one by one */ while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb), ogm_packet)) { batadv_iv_ogm_process(skb, ogm_offset, if_incoming); ogm_offset += BATADV_OGM_HLEN; ogm_offset += ntohs(ogm_packet->tvlv_len); packet_pos = skb->data + ogm_offset; ogm_packet = (struct batadv_ogm_packet *)packet_pos; } ret = NET_RX_SUCCESS; free_skb: if (ret == NET_RX_SUCCESS) consume_skb(skb); else kfree_skb(skb); return ret; } /** * batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a * given outgoing interface. * @neigh_node: Neighbour of interest * @if_outgoing: Outgoing interface of interest * @tq_avg: Pointer of where to store the TQ average * * Return: False if no average TQ available, otherwise true. */ static bool batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node, struct batadv_hard_iface *if_outgoing, u8 *tq_avg) { struct batadv_neigh_ifinfo *n_ifinfo; n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); if (!n_ifinfo) return false; *tq_avg = n_ifinfo->bat_iv.tq_avg; batadv_neigh_ifinfo_put(n_ifinfo); return true; } /** * batadv_iv_ogm_orig_dump_subentry() - Dump an originator subentry into a * message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @orig_node: Originator to dump * @neigh_node: Single hops neighbour * @best: Is the best originator * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing, struct batadv_orig_node *orig_node, struct batadv_neigh_node *neigh_node, bool best) { void *hdr; u8 tq_avg; unsigned int last_seen_msecs; last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen); if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node, if_outgoing, &tq_avg)) return 0; if (if_outgoing != BATADV_IF_DEFAULT && if_outgoing != neigh_node->if_incoming) return 0; hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_ORIGINATORS); if (!hdr) return -ENOBUFS; if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, orig_node->orig) || nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, neigh_node->addr) || nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, neigh_node->if_incoming->net_dev->name) || nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, neigh_node->if_incoming->net_dev->ifindex) || nla_put_u8(msg, BATADV_ATTR_TQ, tq_avg) || nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, last_seen_msecs)) goto nla_put_failure; if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } /** * batadv_iv_ogm_orig_dump_entry() - Dump an originator entry into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @orig_node: Originator to dump * @sub_s: Number of sub entries to skip * * This function assumes the caller holds rcu_read_lock(). * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing, struct batadv_orig_node *orig_node, int *sub_s) { struct batadv_neigh_node *neigh_node_best; struct batadv_neigh_node *neigh_node; int sub = 0; bool best; u8 tq_avg_best; neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing); if (!neigh_node_best) goto out; if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node_best, if_outgoing, &tq_avg_best)) goto out; if (tq_avg_best == 0) goto out; hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) { if (sub++ < *sub_s) continue; best = (neigh_node == neigh_node_best); if (batadv_iv_ogm_orig_dump_subentry(msg, portid, seq, bat_priv, if_outgoing, orig_node, neigh_node, best)) { batadv_neigh_node_put(neigh_node_best); *sub_s = sub - 1; return -EMSGSIZE; } } out: batadv_neigh_node_put(neigh_node_best); *sub_s = 0; return 0; } /** * batadv_iv_ogm_orig_dump_bucket() - Dump an originator bucket into a * message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @head: Bucket to be dumped * @idx_s: Number of entries to be skipped * @sub: Number of sub entries to be skipped * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing, struct hlist_head *head, int *idx_s, int *sub) { struct batadv_orig_node *orig_node; int idx = 0; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { if (idx++ < *idx_s) continue; if (batadv_iv_ogm_orig_dump_entry(msg, portid, seq, bat_priv, if_outgoing, orig_node, sub)) { rcu_read_unlock(); *idx_s = idx - 1; return -EMSGSIZE; } } rcu_read_unlock(); *idx_s = 0; *sub = 0; return 0; } /** * batadv_iv_ogm_orig_dump() - Dump the originators into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface */ static void batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb, struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; int bucket = cb->args[0]; int idx = cb->args[1]; int sub = cb->args[2]; int portid = NETLINK_CB(cb->skb).portid; while (bucket < hash->size) { head = &hash->table[bucket]; if (batadv_iv_ogm_orig_dump_bucket(msg, portid, cb->nlh->nlmsg_seq, bat_priv, if_outgoing, head, &idx, &sub)) break; bucket++; } cb->args[0] = bucket; cb->args[1] = idx; cb->args[2] = sub; } /** * batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors * @neigh1: the first neighbor object of the comparison * @if_outgoing1: outgoing interface for the first neighbor * @neigh2: the second neighbor object of the comparison * @if_outgoing2: outgoing interface for the second neighbor * @diff: pointer to integer receiving the calculated difference * * The content of *@diff is only valid when this function returns true. * It is less, equal to or greater than 0 if the metric via neigh1 is lower, * the same as or higher than the metric via neigh2 * * Return: true when the difference could be calculated, false otherwise */ static bool batadv_iv_ogm_neigh_diff(struct batadv_neigh_node *neigh1, struct batadv_hard_iface *if_outgoing1, struct batadv_neigh_node *neigh2, struct batadv_hard_iface *if_outgoing2, int *diff) { struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo; u8 tq1, tq2; bool ret = true; neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1); neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2); if (!neigh1_ifinfo || !neigh2_ifinfo) { ret = false; goto out; } tq1 = neigh1_ifinfo->bat_iv.tq_avg; tq2 = neigh2_ifinfo->bat_iv.tq_avg; *diff = (int)tq1 - (int)tq2; out: batadv_neigh_ifinfo_put(neigh1_ifinfo); batadv_neigh_ifinfo_put(neigh2_ifinfo); return ret; } /** * batadv_iv_ogm_neigh_dump_neigh() - Dump a neighbour into a netlink message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @hardif_neigh: Neighbour to be dumped * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_hardif_neigh_node *hardif_neigh) { void *hdr; unsigned int last_seen_msecs; last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen); hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_NEIGHBORS); if (!hdr) return -ENOBUFS; if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, hardif_neigh->addr) || nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, hardif_neigh->if_incoming->net_dev->name) || nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, hardif_neigh->if_incoming->net_dev->ifindex) || nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, last_seen_msecs)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } /** * batadv_iv_ogm_neigh_dump_hardif() - Dump the neighbours of a hard interface * into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the mesh interface information * @hard_iface: Hard interface to dump the neighbours for * @idx_s: Number of entries to skip * * This function assumes the caller holds rcu_read_lock(). * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct batadv_hard_iface *hard_iface, int *idx_s) { struct batadv_hardif_neigh_node *hardif_neigh; int idx = 0; hlist_for_each_entry_rcu(hardif_neigh, &hard_iface->neigh_list, list) { if (idx++ < *idx_s) continue; if (batadv_iv_ogm_neigh_dump_neigh(msg, portid, seq, hardif_neigh)) { *idx_s = idx - 1; return -EMSGSIZE; } } *idx_s = 0; return 0; } /** * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options * @bat_priv: The bat priv with all the mesh interface information * @single_hardif: Limit dump to this hard interface */ static void batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, struct batadv_priv *bat_priv, struct batadv_hard_iface *single_hardif) { struct batadv_hard_iface *hard_iface; int i_hardif = 0; int i_hardif_s = cb->args[0]; int idx = cb->args[1]; int portid = NETLINK_CB(cb->skb).portid; rcu_read_lock(); if (single_hardif) { if (i_hardif_s == 0) { if (batadv_iv_ogm_neigh_dump_hardif(msg, portid, cb->nlh->nlmsg_seq, bat_priv, single_hardif, &idx) == 0) i_hardif++; } } else { list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (i_hardif++ < i_hardif_s) continue; if (batadv_iv_ogm_neigh_dump_hardif(msg, portid, cb->nlh->nlmsg_seq, bat_priv, hard_iface, &idx)) { i_hardif--; break; } } } rcu_read_unlock(); cb->args[0] = i_hardif; cb->args[1] = idx; } /** * batadv_iv_ogm_neigh_cmp() - compare the metrics of two neighbors * @neigh1: the first neighbor object of the comparison * @if_outgoing1: outgoing interface for the first neighbor * @neigh2: the second neighbor object of the comparison * @if_outgoing2: outgoing interface for the second neighbor * * Return: a value less, equal to or greater than 0 if the metric via neigh1 is * lower, the same as or higher than the metric via neigh2 */ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1, struct batadv_hard_iface *if_outgoing1, struct batadv_neigh_node *neigh2, struct batadv_hard_iface *if_outgoing2) { bool ret; int diff; ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2, if_outgoing2, &diff); if (!ret) return 0; return diff; } /** * batadv_iv_ogm_neigh_is_sob() - check if neigh1 is similarly good or better * than neigh2 from the metric prospective * @neigh1: the first neighbor object of the comparison * @if_outgoing1: outgoing interface for the first neighbor * @neigh2: the second neighbor object of the comparison * @if_outgoing2: outgoing interface for the second neighbor * * Return: true if the metric via neigh1 is equally good or better than * the metric via neigh2, false otherwise. */ static bool batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1, struct batadv_hard_iface *if_outgoing1, struct batadv_neigh_node *neigh2, struct batadv_hard_iface *if_outgoing2) { bool ret; int diff; ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2, if_outgoing2, &diff); if (!ret) return false; ret = diff > -BATADV_TQ_SIMILARITY_THRESHOLD; return ret; } static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface) { /* begin scheduling originator messages on that interface */ batadv_iv_ogm_schedule(hard_iface); } /** * batadv_iv_init_sel_class() - initialize GW selection class * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv) { /* set default TQ difference threshold to 20 */ atomic_set(&bat_priv->gw.sel_class, 20); } static struct batadv_gw_node * batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) { struct batadv_neigh_node *router; struct batadv_neigh_ifinfo *router_ifinfo; struct batadv_gw_node *gw_node, *curr_gw = NULL; u64 max_gw_factor = 0; u64 tmp_gw_factor = 0; u8 max_tq = 0; u8 tq_avg; struct batadv_orig_node *orig_node; rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) { orig_node = gw_node->orig_node; router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router) continue; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto next; if (!kref_get_unless_zero(&gw_node->refcount)) goto next; tq_avg = router_ifinfo->bat_iv.tq_avg; switch (atomic_read(&bat_priv->gw.sel_class)) { case 1: /* fast connection */ tmp_gw_factor = tq_avg * tq_avg; tmp_gw_factor *= gw_node->bandwidth_down; tmp_gw_factor *= 100 * 100; tmp_gw_factor >>= 18; if (tmp_gw_factor > max_gw_factor || (tmp_gw_factor == max_gw_factor && tq_avg > max_tq)) { batadv_gw_node_put(curr_gw); curr_gw = gw_node; kref_get(&curr_gw->refcount); } break; default: /* 2: stable connection (use best statistic) * 3: fast-switch (use best statistic but change as * soon as a better gateway appears) * XX: late-switch (use best statistic but change as * soon as a better gateway appears which has * $routing_class more tq points) */ if (tq_avg > max_tq) { batadv_gw_node_put(curr_gw); curr_gw = gw_node; kref_get(&curr_gw->refcount); } break; } if (tq_avg > max_tq) max_tq = tq_avg; if (tmp_gw_factor > max_gw_factor) max_gw_factor = tmp_gw_factor; batadv_gw_node_put(gw_node); next: batadv_neigh_node_put(router); batadv_neigh_ifinfo_put(router_ifinfo); } rcu_read_unlock(); return curr_gw; } static bool batadv_iv_gw_is_eligible(struct batadv_priv *bat_priv, struct batadv_orig_node *curr_gw_orig, struct batadv_orig_node *orig_node) { struct batadv_neigh_ifinfo *router_orig_ifinfo = NULL; struct batadv_neigh_ifinfo *router_gw_ifinfo = NULL; struct batadv_neigh_node *router_gw = NULL; struct batadv_neigh_node *router_orig = NULL; u8 gw_tq_avg, orig_tq_avg; bool ret = false; /* dynamic re-election is performed only on fast or late switch */ if (atomic_read(&bat_priv->gw.sel_class) <= 2) return false; router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT); if (!router_gw) { ret = true; goto out; } router_gw_ifinfo = batadv_neigh_ifinfo_get(router_gw, BATADV_IF_DEFAULT); if (!router_gw_ifinfo) { ret = true; goto out; } router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router_orig) goto out; router_orig_ifinfo = batadv_neigh_ifinfo_get(router_orig, BATADV_IF_DEFAULT); if (!router_orig_ifinfo) goto out; gw_tq_avg = router_gw_ifinfo->bat_iv.tq_avg; orig_tq_avg = router_orig_ifinfo->bat_iv.tq_avg; /* the TQ value has to be better */ if (orig_tq_avg < gw_tq_avg) goto out; /* if the routing class is greater than 3 the value tells us how much * greater the TQ value of the new gateway must be */ if ((atomic_read(&bat_priv->gw.sel_class) > 3) && (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class))) goto out; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", gw_tq_avg, orig_tq_avg); ret = true; out: batadv_neigh_ifinfo_put(router_gw_ifinfo); batadv_neigh_ifinfo_put(router_orig_ifinfo); batadv_neigh_node_put(router_gw); batadv_neigh_node_put(router_orig); return ret; } /** * batadv_iv_gw_dump_entry() - Dump a gateway into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @cb: Control block containing additional options * @bat_priv: The bat priv with all the mesh interface information * @gw_node: Gateway to be dumped * * Return: Error code, or 0 on success */ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, struct netlink_callback *cb, struct batadv_priv *bat_priv, struct batadv_gw_node *gw_node) { struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; struct batadv_gw_node *curr_gw = NULL; int ret = 0; void *hdr; router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); if (!router) goto out; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto out; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS); if (!hdr) { ret = -ENOBUFS; goto out; } genl_dump_check_consistent(cb, hdr); ret = -EMSGSIZE; if (curr_gw == gw_node) if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) { genlmsg_cancel(msg, hdr); goto out; } if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, gw_node->orig_node->orig) || nla_put_u8(msg, BATADV_ATTR_TQ, router_ifinfo->bat_iv.tq_avg) || nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN, router->addr) || nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, router->if_incoming->net_dev->name) || nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, router->if_incoming->net_dev->ifindex) || nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN, gw_node->bandwidth_down) || nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP, gw_node->bandwidth_up)) { genlmsg_cancel(msg, hdr); goto out; } genlmsg_end(msg, hdr); ret = 0; out: batadv_gw_node_put(curr_gw); batadv_neigh_ifinfo_put(router_ifinfo); batadv_neigh_node_put(router); return ret; } /** * batadv_iv_gw_dump() - Dump gateways into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options * @bat_priv: The bat priv with all the mesh interface information */ static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb, struct batadv_priv *bat_priv) { int portid = NETLINK_CB(cb->skb).portid; struct batadv_gw_node *gw_node; int idx_skip = cb->args[0]; int idx = 0; spin_lock_bh(&bat_priv->gw.list_lock); cb->seq = bat_priv->gw.generation << 1 | 1; hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) { if (idx++ < idx_skip) continue; if (batadv_iv_gw_dump_entry(msg, portid, cb, bat_priv, gw_node)) { idx_skip = idx - 1; goto unlock; } } idx_skip = idx; unlock: spin_unlock_bh(&bat_priv->gw.list_lock); cb->args[0] = idx_skip; } static struct batadv_algo_ops batadv_batman_iv __read_mostly = { .name = "BATMAN_IV", .iface = { .enable = batadv_iv_ogm_iface_enable, .enabled = batadv_iv_iface_enabled, .disable = batadv_iv_ogm_iface_disable, .update_mac = batadv_iv_ogm_iface_update_mac, .primary_set = batadv_iv_ogm_primary_iface_set, }, .neigh = { .cmp = batadv_iv_ogm_neigh_cmp, .is_similar_or_better = batadv_iv_ogm_neigh_is_sob, .dump = batadv_iv_ogm_neigh_dump, }, .orig = { .dump = batadv_iv_ogm_orig_dump, }, .gw = { .init_sel_class = batadv_iv_init_sel_class, .sel_class_max = BATADV_TQ_MAX_VALUE, .get_best_gw_node = batadv_iv_gw_get_best_gw_node, .is_eligible = batadv_iv_gw_is_eligible, .dump = batadv_iv_gw_dump, }, }; /** * batadv_iv_init() - B.A.T.M.A.N. IV initialization function * * Return: 0 on success or negative error number in case of failure */ int __init batadv_iv_init(void) { int ret; /* batman originator packet */ ret = batadv_recv_handler_register(BATADV_IV_OGM, batadv_iv_ogm_receive); if (ret < 0) goto out; ret = batadv_algo_register(&batadv_batman_iv); if (ret < 0) goto handler_unregister; goto out; handler_unregister: batadv_recv_handler_unregister(BATADV_IV_OGM); out: return ret; } |
25 21 24 24 23 3 2 1 21 1 2 2 2 1 20 18 1 18 25 2 5 5 1 1 5 5 5 18 18 18 7 8 1 7 1 6 1 5 1 4 2 2 1 1 10 12 7 5 1 4 1 3 18 59 60 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/act_gact.c Generic actions * * copyright Jamal Hadi Salim (2002-4) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/module.h> #include <linux/init.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <linux/tc_act/tc_gact.h> #include <net/tc_act/tc_gact.h> #include <net/tc_wrapper.h> static struct tc_action_ops act_gact_ops; #ifdef CONFIG_GACT_PROB static int gact_net_rand(struct tcf_gact *gact) { smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */ if (get_random_u32_below(gact->tcfg_pval)) return gact->tcf_action; return gact->tcfg_paction; } static int gact_determ(struct tcf_gact *gact) { u32 pack = atomic_inc_return(&gact->packets); smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */ if (pack % gact->tcfg_pval) return gact->tcf_action; return gact->tcfg_paction; } typedef int (*g_rand)(struct tcf_gact *gact); static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ }; #endif /* CONFIG_GACT_PROB */ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = { [TCA_GACT_PARMS] = { .len = sizeof(struct tc_gact) }, [TCA_GACT_PROB] = { .len = sizeof(struct tc_gact_p) }, }; static int tcf_gact_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_gact_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_GACT_MAX + 1]; struct tcf_chain *goto_ch = NULL; struct tc_gact *parm; struct tcf_gact *gact; int ret = 0; u32 index; int err; #ifdef CONFIG_GACT_PROB struct tc_gact_p *p_parm = NULL; #endif if (nla == NULL) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_GACT_MAX, nla, gact_policy, NULL); if (err < 0) return err; if (tb[TCA_GACT_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_GACT_PARMS]); index = parm->index; #ifndef CONFIG_GACT_PROB if (tb[TCA_GACT_PROB] != NULL) return -EOPNOTSUPP; #else if (tb[TCA_GACT_PROB]) { p_parm = nla_data(tb[TCA_GACT_PROB]); if (p_parm->ptype >= MAX_RAND) return -EINVAL; if (TC_ACT_EXT_CMP(p_parm->paction, TC_ACT_GOTO_CHAIN)) { NL_SET_ERR_MSG(extack, "goto chain not allowed on fallback"); return -EINVAL; } } #endif err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { ret = tcf_idr_create_from_flags(tn, index, est, a, &act_gact_ops, bind, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (err > 0) { if (bind)/* dont override defaults */ return ACT_P_BOUND; if (!(flags & TCA_ACT_FLAGS_REPLACE)) { tcf_idr_release(*a, bind); return -EEXIST; } } else { return err; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; gact = to_gact(*a); spin_lock_bh(&gact->tcf_lock); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); #ifdef CONFIG_GACT_PROB if (p_parm) { gact->tcfg_paction = p_parm->paction; gact->tcfg_pval = max_t(u16, 1, p_parm->pval); /* Make sure tcfg_pval is written before tcfg_ptype * coupled with smp_rmb() in gact_net_rand() & gact_determ() */ smp_wmb(); gact->tcfg_ptype = p_parm->ptype; } #endif spin_unlock_bh(&gact->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); return ret; release_idr: tcf_idr_release(*a, bind); return err; } TC_INDIRECT_SCOPE int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_gact *gact = to_gact(a); int action = READ_ONCE(gact->tcf_action); #ifdef CONFIG_GACT_PROB { u32 ptype = READ_ONCE(gact->tcfg_ptype); if (ptype) action = gact_rand[ptype](gact); } #endif tcf_action_update_bstats(&gact->common, skb); if (action == TC_ACT_SHOT) tcf_action_inc_drop_qstats(&gact->common); tcf_lastuse_update(&gact->tcf_tm); return action; } static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { struct tcf_gact *gact = to_gact(a); int action = READ_ONCE(gact->tcf_action); struct tcf_t *tm = &gact->tcf_tm; tcf_action_update_stats(a, bytes, packets, action == TC_ACT_SHOT ? packets : drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_gact *gact = to_gact(a); struct tc_gact opt = { .index = gact->tcf_index, .refcnt = refcount_read(&gact->tcf_refcnt) - ref, .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind, }; struct tcf_t t; spin_lock_bh(&gact->tcf_lock); opt.action = gact->tcf_action; if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt)) goto nla_put_failure; #ifdef CONFIG_GACT_PROB if (gact->tcfg_ptype) { struct tc_gact_p p_opt = { .paction = gact->tcfg_paction, .pval = gact->tcfg_pval, .ptype = gact->tcfg_ptype, }; if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt)) goto nla_put_failure; } #endif tcf_tm_dump(&t, &gact->tcf_tm); if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD)) goto nla_put_failure; spin_unlock_bh(&gact->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&gact->tcf_lock); nlmsg_trim(skb, b); return -1; } static size_t tcf_gact_get_fill_size(const struct tc_action *act) { size_t sz = nla_total_size(sizeof(struct tc_gact)); /* TCA_GACT_PARMS */ #ifdef CONFIG_GACT_PROB if (to_gact(act)->tcfg_ptype) /* TCA_GACT_PROB */ sz += nla_total_size(sizeof(struct tc_gact_p)); #endif return sz; } static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, bool bind, struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; if (is_tcf_gact_ok(act)) { entry->id = FLOW_ACTION_ACCEPT; } else if (is_tcf_gact_shot(act)) { entry->id = FLOW_ACTION_DROP; } else if (is_tcf_gact_trap(act)) { entry->id = FLOW_ACTION_TRAP; } else if (is_tcf_gact_goto_chain(act)) { entry->id = FLOW_ACTION_GOTO; entry->chain_index = tcf_gact_goto_chain_index(act); } else if (is_tcf_gact_continue(act)) { NL_SET_ERR_MSG_MOD(extack, "Offload of \"continue\" action is not supported"); return -EOPNOTSUPP; } else if (is_tcf_gact_reclassify(act)) { NL_SET_ERR_MSG_MOD(extack, "Offload of \"reclassify\" action is not supported"); return -EOPNOTSUPP; } else if (is_tcf_gact_pipe(act)) { NL_SET_ERR_MSG_MOD(extack, "Offload of \"pipe\" action is not supported"); return -EOPNOTSUPP; } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported generic action offload"); return -EOPNOTSUPP; } *index_inc = 1; } else { struct flow_offload_action *fl_action = entry_data; if (is_tcf_gact_ok(act)) fl_action->id = FLOW_ACTION_ACCEPT; else if (is_tcf_gact_shot(act)) fl_action->id = FLOW_ACTION_DROP; else if (is_tcf_gact_trap(act)) fl_action->id = FLOW_ACTION_TRAP; else if (is_tcf_gact_goto_chain(act)) fl_action->id = FLOW_ACTION_GOTO; else return -EOPNOTSUPP; } return 0; } static struct tc_action_ops act_gact_ops = { .kind = "gact", .id = TCA_ID_GACT, .owner = THIS_MODULE, .act = tcf_gact_act, .stats_update = tcf_gact_stats_update, .dump = tcf_gact_dump, .init = tcf_gact_init, .get_fill_size = tcf_gact_get_fill_size, .offload_act_setup = tcf_gact_offload_act_setup, .size = sizeof(struct tcf_gact), }; MODULE_ALIAS_NET_ACT("gact"); static __net_init int gact_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_gact_ops.net_id); return tc_action_net_init(net, tn, &act_gact_ops); } static void __net_exit gact_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_gact_ops.net_id); } static struct pernet_operations gact_net_ops = { .init = gact_init_net, .exit_batch = gact_exit_net, .id = &act_gact_ops.net_id, .size = sizeof(struct tc_action_net), }; MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); MODULE_DESCRIPTION("Generic Classifier actions"); MODULE_LICENSE("GPL"); static int __init gact_init_module(void) { #ifdef CONFIG_GACT_PROB pr_info("GACT probability on\n"); #else pr_info("GACT probability NOT on\n"); #endif return tcf_register_action(&act_gact_ops, &gact_net_ops); } static void __exit gact_cleanup_module(void) { tcf_unregister_action(&act_gact_ops, &gact_net_ops); } module_init(gact_init_module); module_exit(gact_cleanup_module); |
1 1626 18 303 268 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Statically sized hash table implementation * (C) 2012 Sasha Levin <levinsasha928@gmail.com> */ #ifndef _LINUX_HASHTABLE_H #define _LINUX_HASHTABLE_H #include <linux/list.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/hash.h> #include <linux/rculist.h> #define DEFINE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] = \ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } #define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] __read_mostly = \ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } #define DECLARE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] #define HASH_SIZE(name) (ARRAY_SIZE(name)) #define HASH_BITS(name) ilog2(HASH_SIZE(name)) /* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ #define hash_min(val, bits) \ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) static inline void __hash_init(struct hlist_head *ht, unsigned int sz) { unsigned int i; for (i = 0; i < sz; i++) INIT_HLIST_HEAD(&ht[i]); } /** * hash_init - initialize a hash table * @hashtable: hashtable to be initialized * * Calculates the size of the hashtable from the given parameter, otherwise * same as hash_init_size. * * This has to be a macro since HASH_BITS() will not work on pointers since * it calculates the size during preprocessing. */ #define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) /** * hash_add - add an object to a hashtable * @hashtable: hashtable to add to * @node: the &struct hlist_node of the object to be added * @key: the key of the object to be added */ #define hash_add(hashtable, node, key) \ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) /** * hash_add_rcu - add an object to a rcu enabled hashtable * @hashtable: hashtable to add to * @node: the &struct hlist_node of the object to be added * @key: the key of the object to be added */ #define hash_add_rcu(hashtable, node, key) \ hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) /** * hash_hashed - check whether an object is in any hashtable * @node: the &struct hlist_node of the object to be checked */ static inline bool hash_hashed(struct hlist_node *node) { return !hlist_unhashed(node); } static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) { unsigned int i; for (i = 0; i < sz; i++) if (!hlist_empty(&ht[i])) return false; return true; } /** * hash_empty - check whether a hashtable is empty * @hashtable: hashtable to check * * This has to be a macro since HASH_BITS() will not work on pointers since * it calculates the size during preprocessing. */ #define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) /** * hash_del - remove an object from a hashtable * @node: &struct hlist_node of the object to remove */ static inline void hash_del(struct hlist_node *node) { hlist_del_init(node); } /** * hash_del_rcu - remove an object from a rcu enabled hashtable * @node: &struct hlist_node of the object to remove */ static inline void hash_del_rcu(struct hlist_node *node) { hlist_del_init_rcu(node); } /** * hash_for_each - iterate over a hashtable * @name: hashtable to iterate * @bkt: integer to use as bucket loop cursor * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct */ #define hash_for_each(name, bkt, obj, member) \ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ (bkt)++)\ hlist_for_each_entry(obj, &name[bkt], member) /** * hash_for_each_rcu - iterate over a rcu enabled hashtable * @name: hashtable to iterate * @bkt: integer to use as bucket loop cursor * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct */ #define hash_for_each_rcu(name, bkt, obj, member) \ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ (bkt)++)\ hlist_for_each_entry_rcu(obj, &name[bkt], member) /** * hash_for_each_safe - iterate over a hashtable safe against removal of * hash entry * @name: hashtable to iterate * @bkt: integer to use as bucket loop cursor * @tmp: a &struct hlist_node used for temporary storage * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct */ #define hash_for_each_safe(name, bkt, tmp, obj, member) \ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ (bkt)++)\ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) /** * hash_for_each_possible - iterate over all possible objects hashing to the * same bucket * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ #define hash_for_each_possible(name, obj, member, key) \ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) /** * hash_for_each_possible_rcu - iterate over all possible objects hashing to the * same bucket in an rcu enabled hashtable * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ #define hash_for_each_possible_rcu(name, obj, member, key, cond...) \ hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\ member, ## cond) /** * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over * * This is the same as hash_for_each_possible_rcu() except that it does * not do any RCU debugging or tracing. */ #define hash_for_each_possible_rcu_notrace(name, obj, member, key) \ hlist_for_each_entry_rcu_notrace(obj, \ &name[hash_min(key, HASH_BITS(name))], member) /** * hash_for_each_possible_safe - iterate over all possible objects hashing to the * same bucket safe against removals * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry * @tmp: a &struct hlist_node used for temporary storage * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ #define hash_for_each_possible_safe(name, obj, tmp, member, key) \ hlist_for_each_entry_safe(obj, tmp,\ &name[hash_min(key, HASH_BITS(name))], member) #endif |
40 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | #ifndef __NET_SCHED_CODEL_H #define __NET_SCHED_CODEL_H /* * Codel - The Controlled-Delay Active Queue Management algorithm * * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/types.h> #include <linux/ktime.h> #include <linux/skbuff.h> /* Controlling Queue Delay (CoDel) algorithm * ========================================= * Source : Kathleen Nichols and Van Jacobson * http://queue.acm.org/detail.cfm?id=2209336 * * Implemented on linux by Dave Taht and Eric Dumazet */ /* CoDel uses a 1024 nsec clock, encoded in u32 * This gives a range of 2199 seconds, because of signed compares */ typedef u32 codel_time_t; typedef s32 codel_tdiff_t; #define CODEL_SHIFT 10 #define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT) static inline codel_time_t codel_get_time(void) { u64 ns = ktime_get_ns(); return ns >> CODEL_SHIFT; } /* Dealing with timer wrapping, according to RFC 1982, as desc in wikipedia: * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution * codel_time_after(a,b) returns true if the time a is after time b. */ #define codel_time_after(a, b) \ (typecheck(codel_time_t, a) && \ typecheck(codel_time_t, b) && \ ((s32)((a) - (b)) > 0)) #define codel_time_before(a, b) codel_time_after(b, a) #define codel_time_after_eq(a, b) \ (typecheck(codel_time_t, a) && \ typecheck(codel_time_t, b) && \ ((s32)((a) - (b)) >= 0)) #define codel_time_before_eq(a, b) codel_time_after_eq(b, a) static inline u32 codel_time_to_us(codel_time_t val) { u64 valns = ((u64)val << CODEL_SHIFT); do_div(valns, NSEC_PER_USEC); return (u32)valns; } /** * struct codel_params - contains codel parameters * @target: target queue size (in time units) * @ce_threshold: threshold for marking packets with ECN CE * @interval: width of moving time window * @mtu: device mtu, or minimal queue backlog in bytes. * @ecn: is Explicit Congestion Notification enabled * @ce_threshold_selector: apply ce_threshold to packets matching this value * in the diffserv/ECN byte of the IP header * @ce_threshold_mask: mask to apply to ce_threshold_selector comparison */ struct codel_params { codel_time_t target; codel_time_t ce_threshold; codel_time_t interval; u32 mtu; bool ecn; u8 ce_threshold_selector; u8 ce_threshold_mask; }; /** * struct codel_vars - contains codel variables * @count: how many drops we've done since the last time we * entered dropping state * @lastcount: count at entry to dropping state * @dropping: set to true if in dropping state * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 * @first_above_time: when we went (or will go) continuously above target * for interval * @drop_next: time to drop next packet, or when we dropped last * @ldelay: sojourn time of last dequeued packet */ struct codel_vars { u32 count; u32 lastcount; bool dropping; u16 rec_inv_sqrt; codel_time_t first_above_time; codel_time_t drop_next; codel_time_t ldelay; }; #define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */ /* needed shift to get a Q0.32 number from rec_inv_sqrt */ #define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS) /** * struct codel_stats - contains codel shared variables and stats * @maxpacket: largest packet we've seen so far * @drop_count: temp count of dropped packets in dequeue() * @drop_len: bytes of dropped packets in dequeue() * @ecn_mark: number of packets we ECN marked instead of dropping * @ce_mark: number of packets CE marked because sojourn time was above ce_threshold */ struct codel_stats { u32 maxpacket; u32 drop_count; u32 drop_len; u32 ecn_mark; u32 ce_mark; }; #define CODEL_DISABLED_THRESHOLD INT_MAX typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb); typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb); typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx); typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, void *ctx); #endif |
60 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 | // SPDX-License-Identifier: GPL-2.0 /* Bareudp: UDP tunnel encasulation for different Payload types like * MPLS, NSH, IP, etc. * Copyright (c) 2019 Nokia, Inc. * Authors: Martin Varghese, <martin.varghese@nokia.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/hash.h> #include <net/dst_metadata.h> #include <net/gro_cells.h> #include <net/rtnetlink.h> #include <net/protocol.h> #include <net/ip6_tunnel.h> #include <net/ip_tunnels.h> #include <net/udp_tunnel.h> #include <net/bareudp.h> #define BAREUDP_BASE_HLEN sizeof(struct udphdr) #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \ sizeof(struct udphdr)) #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \ sizeof(struct udphdr)) static bool log_ecn_error = true; module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); /* per-network namespace private data for this module */ static unsigned int bareudp_net_id; struct bareudp_net { struct list_head bareudp_list; }; struct bareudp_conf { __be16 ethertype; __be16 port; u16 sport_min; bool multi_proto_mode; }; /* Pseudo network device */ struct bareudp_dev { struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for bareudp tunnel */ __be16 ethertype; __be16 port; u16 sport_min; bool multi_proto_mode; struct socket __rcu *sock; struct list_head next; /* bareudp node on namespace list */ struct gro_cells gro_cells; }; static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct metadata_dst *tun_dst = NULL; IP_TUNNEL_DECLARE_FLAGS(key) = { }; struct bareudp_dev *bareudp; unsigned short family; unsigned int len; __be16 proto; void *oiph; int err; int nh; bareudp = rcu_dereference_sk_user_data(sk); if (!bareudp) goto drop; if (skb->protocol == htons(ETH_P_IP)) family = AF_INET; else family = AF_INET6; if (bareudp->ethertype == htons(ETH_P_IP)) { __u8 ipversion; if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion, sizeof(ipversion))) { dev_dstats_rx_dropped(bareudp->dev); goto drop; } ipversion >>= 4; if (ipversion == 4) { proto = htons(ETH_P_IP); } else if (ipversion == 6 && bareudp->multi_proto_mode) { proto = htons(ETH_P_IPV6); } else { dev_dstats_rx_dropped(bareudp->dev); goto drop; } } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) { struct iphdr *tunnel_hdr; tunnel_hdr = (struct iphdr *)skb_network_header(skb); if (tunnel_hdr->version == 4) { if (!ipv4_is_multicast(tunnel_hdr->daddr)) { proto = bareudp->ethertype; } else if (bareudp->multi_proto_mode && ipv4_is_multicast(tunnel_hdr->daddr)) { proto = htons(ETH_P_MPLS_MC); } else { dev_dstats_rx_dropped(bareudp->dev); goto drop; } } else { int addr_type; struct ipv6hdr *tunnel_hdr_v6; tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb); addr_type = ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr); if (!(addr_type & IPV6_ADDR_MULTICAST)) { proto = bareudp->ethertype; } else if (bareudp->multi_proto_mode && (addr_type & IPV6_ADDR_MULTICAST)) { proto = htons(ETH_P_MPLS_MC); } else { dev_dstats_rx_dropped(bareudp->dev); goto drop; } } } else { proto = bareudp->ethertype; } if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN, proto, !net_eq(bareudp->net, dev_net(bareudp->dev)))) { dev_dstats_rx_dropped(bareudp->dev); goto drop; } __set_bit(IP_TUNNEL_KEY_BIT, key); tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0); if (!tun_dst) { dev_dstats_rx_dropped(bareudp->dev); goto drop; } skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; skb_reset_mac_header(skb); /* Save offset of outer header relative to skb->head, * because we are going to reset the network header to the inner header * and might change skb->head. */ nh = skb_network_header(skb) - skb->head; skb_reset_network_header(skb); if (!pskb_inet_may_pull(skb)) { DEV_STATS_INC(bareudp->dev, rx_length_errors); DEV_STATS_INC(bareudp->dev, rx_errors); goto drop; } /* Get the outer header. */ oiph = skb->head + nh; if (!ipv6_mod_enabled() || family == AF_INET) err = IP_ECN_decapsulate(oiph, skb); else err = IP6_ECN_decapsulate(oiph, skb); if (unlikely(err)) { if (log_ecn_error) { if (!ipv6_mod_enabled() || family == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", &((struct iphdr *)oiph)->saddr, ((struct iphdr *)oiph)->tos); else net_info_ratelimited("non-ECT from %pI6\n", &((struct ipv6hdr *)oiph)->saddr); } if (err > 1) { DEV_STATS_INC(bareudp->dev, rx_frame_errors); DEV_STATS_INC(bareudp->dev, rx_errors); goto drop; } } len = skb->len; err = gro_cells_receive(&bareudp->gro_cells, skb); if (likely(err == NET_RX_SUCCESS)) dev_dstats_rx_add(bareudp->dev, len); return 0; drop: /* Consume bad packet */ kfree_skb(skb); return 0; } static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb) { return 0; } static int bareudp_init(struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); int err; err = gro_cells_init(&bareudp->gro_cells, dev); if (err) return err; return 0; } static void bareudp_uninit(struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); gro_cells_destroy(&bareudp->gro_cells); } static struct socket *bareudp_create_sock(struct net *net, __be16 port) { struct udp_port_cfg udp_conf; struct socket *sock; int err; memset(&udp_conf, 0, sizeof(udp_conf)); if (ipv6_mod_enabled()) udp_conf.family = AF_INET6; else udp_conf.family = AF_INET; udp_conf.local_udp_port = port; /* Open UDP socket */ err = udp_sock_create(net, &udp_conf, &sock); if (err < 0) return ERR_PTR(err); udp_allow_gso(sock->sk); return sock; } /* Create new listen socket if needed */ static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port) { struct udp_tunnel_sock_cfg tunnel_cfg; struct socket *sock; sock = bareudp_create_sock(bareudp->net, port); if (IS_ERR(sock)) return PTR_ERR(sock); /* Mark socket as an encapsulation socket */ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); tunnel_cfg.sk_user_data = bareudp; tunnel_cfg.encap_type = 1; tunnel_cfg.encap_rcv = bareudp_udp_encap_recv; tunnel_cfg.encap_err_lookup = bareudp_err_lookup; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg); rcu_assign_pointer(bareudp->sock, sock); return 0; } static int bareudp_open(struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); int ret = 0; ret = bareudp_socket_create(bareudp, bareudp->port); return ret; } static void bareudp_sock_release(struct bareudp_dev *bareudp) { struct socket *sock; sock = bareudp->sock; rcu_assign_pointer(bareudp->sock, NULL); synchronize_net(); udp_tunnel_sock_release(sock); } static int bareudp_stop(struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); bareudp_sock_release(bareudp); return 0; } static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct bareudp_dev *bareudp, const struct ip_tunnel_info *info) { bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct socket *sock = rcu_dereference(bareudp->sock); const struct ip_tunnel_key *key = &info->key; struct rtable *rt; __be16 sport, df; int min_headroom; __u8 tos, ttl; __be32 saddr; int err; if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) return -EINVAL; if (!sock) return -ESHUTDOWN; sport = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, true); rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key, sport, bareudp->port, key->tos, use_cache ? (struct dst_cache *)&info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, BAREUDP_IPV4_HLEN + info->options_len, false); tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ? htons(IP_DF) : 0; skb_scrub_packet(skb, xnet); err = -ENOSPC; if (!skb_pull(skb, skb_network_offset(skb))) goto free_dst; min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr); err = skb_cow_head(skb, min_headroom); if (unlikely(err)) goto free_dst; err = udp_tunnel_handle_offloads(skb, udp_sum); if (err) goto free_dst; skb_set_inner_protocol(skb, bareudp->ethertype); udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, bareudp->port, !net_eq(bareudp->net, dev_net(bareudp->dev)), !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags)); return 0; free_dst: dst_release(&rt->dst); return err; } static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct bareudp_dev *bareudp, const struct ip_tunnel_info *info) { bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct socket *sock = rcu_dereference(bareudp->sock); const struct ip_tunnel_key *key = &info->key; struct dst_entry *dst = NULL; struct in6_addr saddr, daddr; int min_headroom; __u8 prio, ttl; __be16 sport; int err; if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) return -EINVAL; if (!sock) return -ESHUTDOWN; sport = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, true); dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr, key, sport, bareudp->port, key->tos, use_cache ? (struct dst_cache *) &info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len, false); prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; skb_scrub_packet(skb, xnet); err = -ENOSPC; if (!skb_pull(skb, skb_network_offset(skb))) goto free_dst; min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr); err = skb_cow_head(skb, min_headroom); if (unlikely(err)) goto free_dst; err = udp_tunnel_handle_offloads(skb, udp_sum); if (err) goto free_dst; daddr = info->key.u.ipv6.dst; udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev, &saddr, &daddr, prio, ttl, info->key.label, sport, bareudp->port, !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags)); return 0; free_dst: dst_release(dst); return err; } static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto) { if (bareudp->ethertype == proto) return true; if (!bareudp->multi_proto_mode) return false; if (bareudp->ethertype == htons(ETH_P_MPLS_UC) && proto == htons(ETH_P_MPLS_MC)) return true; if (bareudp->ethertype == htons(ETH_P_IP) && proto == htons(ETH_P_IPV6)) return true; return false; } static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); struct ip_tunnel_info *info = NULL; int err; if (!bareudp_proto_valid(bareudp, skb->protocol)) { err = -EINVAL; goto tx_error; } info = skb_tunnel_info(skb); if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { err = -EINVAL; goto tx_error; } rcu_read_lock(); if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6) err = bareudp6_xmit_skb(skb, dev, bareudp, info); else err = bareudp_xmit_skb(skb, dev, bareudp, info); rcu_read_unlock(); if (likely(!err)) return NETDEV_TX_OK; tx_error: dev_kfree_skb(skb); if (err == -ELOOP) DEV_STATS_INC(dev, collisions); else if (err == -ENETUNREACH) DEV_STATS_INC(dev, tx_carrier_errors); DEV_STATS_INC(dev, tx_errors); return NETDEV_TX_OK; } static int bareudp_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); struct bareudp_dev *bareudp = netdev_priv(dev); bool use_cache; __be16 sport; use_cache = ip_tunnel_dst_cache_usable(skb, info); sport = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, true); if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; __be32 saddr; rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key, sport, bareudp->port, info->key.tos, use_cache ? &info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); info->key.u.ipv4.src = saddr; } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct in6_addr saddr; struct socket *sock = rcu_dereference(bareudp->sock); dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr, &info->key, sport, bareudp->port, info->key.tos, use_cache ? &info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); dst_release(dst); info->key.u.ipv6.src = saddr; } else { return -EINVAL; } info->key.tp_src = sport; info->key.tp_dst = bareudp->port; return 0; } static const struct net_device_ops bareudp_netdev_ops = { .ndo_init = bareudp_init, .ndo_uninit = bareudp_uninit, .ndo_open = bareudp_open, .ndo_stop = bareudp_stop, .ndo_start_xmit = bareudp_xmit, .ndo_fill_metadata_dst = bareudp_fill_metadata_dst, }; static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = { [IFLA_BAREUDP_PORT] = { .type = NLA_U16 }, [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 }, [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 }, [IFLA_BAREUDP_MULTIPROTO_MODE] = { .type = NLA_FLAG }, }; /* Info for udev, that this is a virtual tunnel endpoint */ static const struct device_type bareudp_type = { .name = "bareudp", }; /* Initialize the device structure. */ static void bareudp_setup(struct net_device *dev) { dev->netdev_ops = &bareudp_netdev_ops; dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &bareudp_type); dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->features |= NETIF_F_RXCSUM; dev->features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; dev->hw_features |= NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = ETH_DATA_LEN; dev->min_mtu = IPV4_MIN_MTU; dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN; dev->type = ARPHRD_NONE; netif_keep_dst(dev); dev->priv_flags |= IFF_NO_QUEUE; dev->lltx = true; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; } static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (!data) { NL_SET_ERR_MSG(extack, "Not enough attributes provided to perform the operation"); return -EINVAL; } return 0; } static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, struct netlink_ext_ack *extack) { memset(conf, 0, sizeof(*conf)); if (!data[IFLA_BAREUDP_PORT]) { NL_SET_ERR_MSG(extack, "port not specified"); return -EINVAL; } if (!data[IFLA_BAREUDP_ETHERTYPE]) { NL_SET_ERR_MSG(extack, "ethertype not specified"); return -EINVAL; } conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]); conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]); if (data[IFLA_BAREUDP_SRCPORT_MIN]) conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]); if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) conf->multi_proto_mode = true; return 0; } static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn, const struct bareudp_conf *conf) { struct bareudp_dev *bareudp, *t = NULL; list_for_each_entry(bareudp, &bn->bareudp_list, next) { if (conf->port == bareudp->port) t = bareudp; } return t; } static int bareudp_configure(struct net *net, struct net_device *dev, struct bareudp_conf *conf, struct netlink_ext_ack *extack) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); struct bareudp_dev *t, *bareudp = netdev_priv(dev); int err; bareudp->net = net; bareudp->dev = dev; t = bareudp_find_dev(bn, conf); if (t) { NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists"); return -EBUSY; } if (conf->multi_proto_mode && (conf->ethertype != htons(ETH_P_MPLS_UC) && conf->ethertype != htons(ETH_P_IP))) { NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)"); return -EINVAL; } bareudp->port = conf->port; bareudp->ethertype = conf->ethertype; bareudp->sport_min = conf->sport_min; bareudp->multi_proto_mode = conf->multi_proto_mode; err = register_netdevice(dev); if (err) return err; list_add(&bareudp->next, &bn->bareudp_list); return 0; } static int bareudp_link_config(struct net_device *dev, struct nlattr *tb[]) { int err; if (tb[IFLA_MTU]) { err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); if (err) return err; } return 0; } static void bareudp_dellink(struct net_device *dev, struct list_head *head) { struct bareudp_dev *bareudp = netdev_priv(dev); list_del(&bareudp->next); unregister_netdevice_queue(dev, head); } static int bareudp_newlink(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net *link_net = rtnl_newlink_link_net(params); struct nlattr **data = params->data; struct nlattr **tb = params->tb; struct bareudp_conf conf; int err; err = bareudp2info(data, &conf, extack); if (err) return err; err = bareudp_configure(link_net, dev, &conf, extack); if (err) return err; err = bareudp_link_config(dev, tb); if (err) goto err_unconfig; return 0; err_unconfig: bareudp_dellink(dev, NULL); return err; } static size_t bareudp_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */ nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */ 0; } static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port)) goto nla_put_failure; if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min)) goto nla_put_failure; if (bareudp->multi_proto_mode && nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops bareudp_link_ops __read_mostly = { .kind = "bareudp", .maxtype = IFLA_BAREUDP_MAX, .policy = bareudp_policy, .priv_size = sizeof(struct bareudp_dev), .setup = bareudp_setup, .validate = bareudp_validate, .newlink = bareudp_newlink, .dellink = bareudp_dellink, .get_size = bareudp_get_size, .fill_info = bareudp_fill_info, }; static __net_init int bareudp_init_net(struct net *net) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); INIT_LIST_HEAD(&bn->bareudp_list); return 0; } static void bareudp_destroy_tunnels(struct net *net, struct list_head *head) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); struct bareudp_dev *bareudp, *next; list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next) unregister_netdevice_queue(bareudp->dev, head); } static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list, struct list_head *dev_kill_list) { struct net *net; list_for_each_entry(net, net_list, exit_list) bareudp_destroy_tunnels(net, dev_kill_list); } static struct pernet_operations bareudp_net_ops = { .init = bareudp_init_net, .exit_batch_rtnl = bareudp_exit_batch_rtnl, .id = &bareudp_net_id, .size = sizeof(struct bareudp_net), }; static int __init bareudp_init_module(void) { int rc; rc = register_pernet_subsys(&bareudp_net_ops); if (rc) goto out1; rc = rtnl_link_register(&bareudp_link_ops); if (rc) goto out2; return 0; out2: unregister_pernet_subsys(&bareudp_net_ops); out1: return rc; } late_initcall(bareudp_init_module); static void __exit bareudp_cleanup_module(void) { rtnl_link_unregister(&bareudp_link_ops); unregister_pernet_subsys(&bareudp_net_ops); } module_exit(bareudp_cleanup_module); MODULE_ALIAS_RTNL_LINK("bareudp"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>"); MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic"); |
22 15 14 14 5 5 5 5 1 5 5 5 5 5 5 5 5 5 5 1 16 5 16 6 5 4 4 3 1 7 12 12 3 5 10 8 7 7 7 1 1 1 1 1 7 7 7 7 7 7 14 13 14 14 13 13 2 13 13 3 22 22 21 15 7 22 22 2 1 21 6 20 20 20 1 10 10 3 9 9 9 9 1 1 1 1 1 1 1 1 1 1 23 23 10 2 23 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 | /* FUSE: Filesystem in Userspace Copyright (C) 2001-2018 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include <linux/iversion.h> #include <linux/posix_acl.h> #include <linux/pagemap.h> #include <linux/highmem.h> static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx) { struct fuse_conn *fc = get_fuse_conn(dir); struct fuse_inode *fi = get_fuse_inode(dir); if (!fc->do_readdirplus) return false; if (!fc->readdirplus_auto) return true; if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state)) return true; if (ctx->pos == 0) return true; return false; } static void fuse_add_dirent_to_cache(struct file *file, struct fuse_dirent *dirent, loff_t pos) { struct fuse_inode *fi = get_fuse_inode(file_inode(file)); size_t reclen = FUSE_DIRENT_SIZE(dirent); pgoff_t index; struct page *page; loff_t size; u64 version; unsigned int offset; void *addr; spin_lock(&fi->rdc.lock); /* * Is cache already completed? Or this entry does not go at the end of * cache? */ if (fi->rdc.cached || pos != fi->rdc.pos) { spin_unlock(&fi->rdc.lock); return; } version = fi->rdc.version; size = fi->rdc.size; offset = size & ~PAGE_MASK; index = size >> PAGE_SHIFT; /* Dirent doesn't fit in current page? Jump to next page. */ if (offset + reclen > PAGE_SIZE) { index++; offset = 0; } spin_unlock(&fi->rdc.lock); if (offset) { page = find_lock_page(file->f_mapping, index); } else { page = find_or_create_page(file->f_mapping, index, mapping_gfp_mask(file->f_mapping)); } if (!page) return; spin_lock(&fi->rdc.lock); /* Raced with another readdir */ if (fi->rdc.version != version || fi->rdc.size != size || WARN_ON(fi->rdc.pos != pos)) goto unlock; addr = kmap_local_page(page); if (!offset) { clear_page(addr); SetPageUptodate(page); } memcpy(addr + offset, dirent, reclen); kunmap_local(addr); fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen; fi->rdc.pos = dirent->off; unlock: spin_unlock(&fi->rdc.lock); unlock_page(page); put_page(page); } static void fuse_readdir_cache_end(struct file *file, loff_t pos) { struct fuse_inode *fi = get_fuse_inode(file_inode(file)); loff_t end; spin_lock(&fi->rdc.lock); /* does cache end position match current position? */ if (fi->rdc.pos != pos) { spin_unlock(&fi->rdc.lock); return; } fi->rdc.cached = true; end = ALIGN(fi->rdc.size, PAGE_SIZE); spin_unlock(&fi->rdc.lock); /* truncate unused tail of cache */ truncate_inode_pages(file->f_mapping, end); } static bool fuse_emit(struct file *file, struct dir_context *ctx, struct fuse_dirent *dirent) { struct fuse_file *ff = file->private_data; if (ff->open_flags & FOPEN_CACHE_DIR) fuse_add_dirent_to_cache(file, dirent, ctx->pos); return dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino, dirent->type); } static int parse_dirfile(char *buf, size_t nbytes, struct file *file, struct dir_context *ctx) { while (nbytes >= FUSE_NAME_OFFSET) { struct fuse_dirent *dirent = (struct fuse_dirent *) buf; size_t reclen = FUSE_DIRENT_SIZE(dirent); if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX) return -EIO; if (reclen > nbytes) break; if (memchr(dirent->name, '/', dirent->namelen) != NULL) return -EIO; if (!fuse_emit(file, ctx, dirent)) break; buf += reclen; nbytes -= reclen; ctx->pos = dirent->off; } return 0; } static int fuse_direntplus_link(struct file *file, struct fuse_direntplus *direntplus, u64 attr_version, u64 evict_ctr) { struct fuse_entry_out *o = &direntplus->entry_out; struct fuse_dirent *dirent = &direntplus->dirent; struct dentry *parent = file->f_path.dentry; struct qstr name = QSTR_INIT(dirent->name, dirent->namelen); struct dentry *dentry; struct dentry *alias; struct inode *dir = d_inode(parent); struct fuse_conn *fc; struct inode *inode; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); if (!o->nodeid) { /* * Unlike in the case of fuse_lookup, zero nodeid does not mean * ENOENT. Instead, it only means the userspace filesystem did * not want to return attributes/handle for this entry. * * So do nothing. */ return 0; } if (name.name[0] == '.') { /* * We could potentially refresh the attributes of the directory * and its parent? */ if (name.len == 1) return 0; if (name.name[1] == '.' && name.len == 2) return 0; } if (invalid_nodeid(o->nodeid)) return -EIO; if (fuse_invalid_attr(&o->attr)) return -EIO; fc = get_fuse_conn(dir); name.hash = full_name_hash(parent, name.name, name.len); dentry = d_lookup(parent, &name); if (!dentry) { retry: dentry = d_alloc_parallel(parent, &name, &wq); if (IS_ERR(dentry)) return PTR_ERR(dentry); } if (!d_in_lookup(dentry)) { struct fuse_inode *fi; inode = d_inode(dentry); if (inode && get_node_id(inode) != o->nodeid) inode = NULL; if (!inode || fuse_stale_inode(inode, o->generation, &o->attr)) { if (inode) fuse_make_bad(inode); d_invalidate(dentry); dput(dentry); goto retry; } if (fuse_is_bad(inode)) { dput(dentry); return -EIO; } fi = get_fuse_inode(inode); spin_lock(&fi->lock); fi->nlookup++; spin_unlock(&fi->lock); forget_all_cached_acls(inode); fuse_change_attributes(inode, &o->attr, NULL, ATTR_TIMEOUT(o), attr_version); /* * The other branch comes via fuse_iget() * which bumps nlookup inside */ } else { inode = fuse_iget(dir->i_sb, o->nodeid, o->generation, &o->attr, ATTR_TIMEOUT(o), attr_version, evict_ctr); if (!inode) inode = ERR_PTR(-ENOMEM); alias = d_splice_alias(inode, dentry); d_lookup_done(dentry); if (alias) { dput(dentry); dentry = alias; } if (IS_ERR(dentry)) { if (!IS_ERR(inode)) { struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fi->lock); fi->nlookup--; spin_unlock(&fi->lock); } return PTR_ERR(dentry); } } if (fc->readdirplus_auto) set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state); fuse_change_entry_timeout(dentry, o); dput(dentry); return 0; } static void fuse_force_forget(struct file *file, u64 nodeid) { struct inode *inode = file_inode(file); struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_forget_in inarg; FUSE_ARGS(args); memset(&inarg, 0, sizeof(inarg)); inarg.nlookup = 1; args.opcode = FUSE_FORGET; args.nodeid = nodeid; args.in_numargs = 1; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.force = true; args.noreply = true; fuse_simple_request(fm, &args); /* ignore errors */ } static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, struct dir_context *ctx, u64 attr_version, u64 evict_ctr) { struct fuse_direntplus *direntplus; struct fuse_dirent *dirent; size_t reclen; int over = 0; int ret; while (nbytes >= FUSE_NAME_OFFSET_DIRENTPLUS) { direntplus = (struct fuse_direntplus *) buf; dirent = &direntplus->dirent; reclen = FUSE_DIRENTPLUS_SIZE(direntplus); if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX) return -EIO; if (reclen > nbytes) break; if (memchr(dirent->name, '/', dirent->namelen) != NULL) return -EIO; if (!over) { /* We fill entries into dstbuf only as much as it can hold. But we still continue iterating over remaining entries to link them. If not, we need to send a FORGET for each of those which we did not link. */ over = !fuse_emit(file, ctx, dirent); if (!over) ctx->pos = dirent->off; } buf += reclen; nbytes -= reclen; ret = fuse_direntplus_link(file, direntplus, attr_version, evict_ctr); if (ret) fuse_force_forget(file, direntplus->entry_out.nodeid); } return 0; } static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx) { int plus; ssize_t res; struct folio *folio; struct inode *inode = file_inode(file); struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_io_args ia = {}; struct fuse_args_pages *ap = &ia.ap; struct fuse_folio_desc desc = { .length = PAGE_SIZE }; u64 attr_version = 0, evict_ctr = 0; bool locked; folio = folio_alloc(GFP_KERNEL, 0); if (!folio) return -ENOMEM; plus = fuse_use_readdirplus(inode, ctx); ap->args.out_pages = true; ap->num_folios = 1; ap->folios = &folio; ap->descs = &desc; if (plus) { attr_version = fuse_get_attr_version(fm->fc); evict_ctr = fuse_get_evict_ctr(fm->fc); fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE, FUSE_READDIRPLUS); } else { fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE, FUSE_READDIR); } locked = fuse_lock_inode(inode); res = fuse_simple_request(fm, &ap->args); fuse_unlock_inode(inode, locked); if (res >= 0) { if (!res) { struct fuse_file *ff = file->private_data; if (ff->open_flags & FOPEN_CACHE_DIR) fuse_readdir_cache_end(file, ctx->pos); } else if (plus) { res = parse_dirplusfile(folio_address(folio), res, file, ctx, attr_version, evict_ctr); } else { res = parse_dirfile(folio_address(folio), res, file, ctx); } } folio_put(folio); fuse_invalidate_atime(inode); return res; } enum fuse_parse_result { FOUND_ERR = -1, FOUND_NONE = 0, FOUND_SOME, FOUND_ALL, }; static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff, void *addr, unsigned int size, struct dir_context *ctx) { unsigned int offset = ff->readdir.cache_off & ~PAGE_MASK; enum fuse_parse_result res = FOUND_NONE; WARN_ON(offset >= size); for (;;) { struct fuse_dirent *dirent = addr + offset; unsigned int nbytes = size - offset; size_t reclen; if (nbytes < FUSE_NAME_OFFSET || !dirent->namelen) break; reclen = FUSE_DIRENT_SIZE(dirent); /* derefs ->namelen */ if (WARN_ON(dirent->namelen > FUSE_NAME_MAX)) return FOUND_ERR; if (WARN_ON(reclen > nbytes)) return FOUND_ERR; if (WARN_ON(memchr(dirent->name, '/', dirent->namelen) != NULL)) return FOUND_ERR; if (ff->readdir.pos == ctx->pos) { res = FOUND_SOME; if (!dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino, dirent->type)) return FOUND_ALL; ctx->pos = dirent->off; } ff->readdir.pos = dirent->off; ff->readdir.cache_off += reclen; offset += reclen; } return res; } static void fuse_rdc_reset(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); fi->rdc.cached = false; fi->rdc.version++; fi->rdc.size = 0; fi->rdc.pos = 0; } #define UNCACHED 1 static int fuse_readdir_cached(struct file *file, struct dir_context *ctx) { struct fuse_file *ff = file->private_data; struct inode *inode = file_inode(file); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); enum fuse_parse_result res; pgoff_t index; unsigned int size; struct page *page; void *addr; /* Seeked? If so, reset the cache stream */ if (ff->readdir.pos != ctx->pos) { ff->readdir.pos = 0; ff->readdir.cache_off = 0; } /* * We're just about to start reading into the cache or reading the * cache; both cases require an up-to-date mtime value. */ if (!ctx->pos && fc->auto_inval_data) { int err = fuse_update_attributes(inode, file, STATX_MTIME); if (err) return err; } retry: spin_lock(&fi->rdc.lock); retry_locked: if (!fi->rdc.cached) { /* Starting cache? Set cache mtime. */ if (!ctx->pos && !fi->rdc.size) { fi->rdc.mtime = inode_get_mtime(inode); fi->rdc.iversion = inode_query_iversion(inode); } spin_unlock(&fi->rdc.lock); return UNCACHED; } /* * When at the beginning of the directory (i.e. just after opendir(3) or * rewinddir(3)), then need to check whether directory contents have * changed, and reset the cache if so. */ if (!ctx->pos) { struct timespec64 mtime = inode_get_mtime(inode); if (inode_peek_iversion(inode) != fi->rdc.iversion || !timespec64_equal(&fi->rdc.mtime, &mtime)) { fuse_rdc_reset(inode); goto retry_locked; } } /* * If cache version changed since the last getdents() call, then reset * the cache stream. */ if (ff->readdir.version != fi->rdc.version) { ff->readdir.pos = 0; ff->readdir.cache_off = 0; } /* * If at the beginning of the cache, than reset version to * current. */ if (ff->readdir.pos == 0) ff->readdir.version = fi->rdc.version; WARN_ON(fi->rdc.size < ff->readdir.cache_off); index = ff->readdir.cache_off >> PAGE_SHIFT; if (index == (fi->rdc.size >> PAGE_SHIFT)) size = fi->rdc.size & ~PAGE_MASK; else size = PAGE_SIZE; spin_unlock(&fi->rdc.lock); /* EOF? */ if ((ff->readdir.cache_off & ~PAGE_MASK) == size) return 0; page = find_get_page_flags(file->f_mapping, index, FGP_ACCESSED | FGP_LOCK); /* Page gone missing, then re-added to cache, but not initialized? */ if (page && !PageUptodate(page)) { unlock_page(page); put_page(page); page = NULL; } spin_lock(&fi->rdc.lock); if (!page) { /* * Uh-oh: page gone missing, cache is useless */ if (fi->rdc.version == ff->readdir.version) fuse_rdc_reset(inode); goto retry_locked; } /* Make sure it's still the same version after getting the page. */ if (ff->readdir.version != fi->rdc.version) { spin_unlock(&fi->rdc.lock); unlock_page(page); put_page(page); goto retry; } spin_unlock(&fi->rdc.lock); /* * Contents of the page are now protected against changing by holding * the page lock. */ addr = kmap_local_page(page); res = fuse_parse_cache(ff, addr, size, ctx); kunmap_local(addr); unlock_page(page); put_page(page); if (res == FOUND_ERR) return -EIO; if (res == FOUND_ALL) return 0; if (size == PAGE_SIZE) { /* We hit end of page: skip to next page. */ ff->readdir.cache_off = ALIGN(ff->readdir.cache_off, PAGE_SIZE); goto retry; } /* * End of cache reached. If found position, then we are done, otherwise * need to fall back to uncached, since the position we were looking for * wasn't in the cache. */ return res == FOUND_SOME ? 0 : UNCACHED; } int fuse_readdir(struct file *file, struct dir_context *ctx) { struct fuse_file *ff = file->private_data; struct inode *inode = file_inode(file); int err; if (fuse_is_bad(inode)) return -EIO; err = UNCACHED; if (ff->open_flags & FOPEN_CACHE_DIR) err = fuse_readdir_cached(file, ctx); if (err == UNCACHED) err = fuse_readdir_uncached(file, ctx); return err; } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 | /* SPDX-License-Identifier: GPL-2.0 */ /* * PHY device list allow maintaining a list of PHY devices that are * part of a netdevice's link topology. PHYs can for example be chained, * as is the case when using a PHY that exposes an SFP module, on which an * SFP transceiver that embeds a PHY is connected. * * This list can then be used by userspace to leverage individual PHY * capabilities. */ #ifndef __PHY_LINK_TOPOLOGY_H #define __PHY_LINK_TOPOLOGY_H #include <linux/ethtool.h> #include <linux/netdevice.h> struct xarray; struct phy_device; struct sfp_bus; struct phy_link_topology { struct xarray phys; u32 next_phy_index; }; struct phy_device_node { enum phy_upstream upstream_type; union { struct net_device *netdev; struct phy_device *phydev; } upstream; struct sfp_bus *parent_sfp_bus; struct phy_device *phy; }; #if IS_ENABLED(CONFIG_PHYLIB) int phy_link_topo_add_phy(struct net_device *dev, struct phy_device *phy, enum phy_upstream upt, void *upstream); void phy_link_topo_del_phy(struct net_device *dev, struct phy_device *phy); static inline struct phy_device * phy_link_topo_get_phy(struct net_device *dev, u32 phyindex) { struct phy_link_topology *topo = dev->link_topo; struct phy_device_node *pdn; if (!topo) return NULL; pdn = xa_load(&topo->phys, phyindex); if (pdn) return pdn->phy; return NULL; } #else static inline int phy_link_topo_add_phy(struct net_device *dev, struct phy_device *phy, enum phy_upstream upt, void *upstream) { return 0; } static inline void phy_link_topo_del_phy(struct net_device *dev, struct phy_device *phy) { } static inline struct phy_device * phy_link_topo_get_phy(struct net_device *dev, u32 phyindex) { return NULL; } #endif #endif /* __PHY_LINK_TOPOLOGY_H */ |
1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 | // SPDX-License-Identifier: GPL-2.0-only /* Linux driver for devices based on the DiBcom DiB0700 USB bridge * * Copyright (C) 2005-9 DiBcom, SA et al */ #include "dib0700.h" #include "dib3000mc.h" #include "dib7000m.h" #include "dib7000p.h" #include "dib8000.h" #include "dib9000.h" #include "mt2060.h" #include "mt2266.h" #include "xc2028.h" #include "xc5000.h" #include "xc4000.h" #include "s5h1411.h" #include "dib0070.h" #include "dib0090.h" #include "lgdt3305.h" #include "mxl5007t.h" #include "mn88472.h" #include "tda18250.h" static int force_lna_activation; module_param(force_lna_activation, int, 0644); MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifier(s) (LNA), if applicable for the device (default: 0=automatic/off)."); struct dib0700_adapter_state { int (*set_param_save) (struct dvb_frontend *); const struct firmware *frontend_firmware; struct dib7000p_ops dib7000p_ops; struct dib8000_ops dib8000_ops; }; /* Hauppauge Nova-T 500 (aka Bristol) * has a LNA on GPIO0 which is enabled by setting 1 */ static struct mt2060_config bristol_mt2060_config[2] = { { .i2c_address = 0x60, .clock_out = 3, }, { .i2c_address = 0x61, } }; static struct dibx000_agc_config bristol_dib3000p_mt2060_agc_config = { .band_caps = BAND_VHF | BAND_UHF, .setup = (1 << 8) | (5 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (2 << 0), .agc1_max = 42598, .agc1_min = 17694, .agc2_max = 45875, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 59, .agc1_slope1 = 0, .agc1_slope2 = 69, .agc2_pt1 = 0, .agc2_pt2 = 59, .agc2_slope1 = 111, .agc2_slope2 = 28, }; static struct dib3000mc_config bristol_dib3000mc_config[2] = { { .agc = &bristol_dib3000p_mt2060_agc_config, .max_time = 0x196, .ln_adc_level = 0x1cc7, .output_mpeg2_in_188_bytes = 1, }, { .agc = &bristol_dib3000p_mt2060_agc_config, .max_time = 0x196, .ln_adc_level = 0x1cc7, .output_mpeg2_in_188_bytes = 1, } }; static int bristol_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; if (adap->id == 0) { dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); if (force_lna_activation) dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); else dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 0); if (dib3000mc_i2c_enumeration(&adap->dev->i2c_adap, 2, DEFAULT_DIB3000P_I2C_ADDRESS, bristol_dib3000mc_config) != 0) { dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(10); return -ENODEV; } } st->mt2060_if1[adap->id] = 1220; return (adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, (10 + adap->id) << 1, &bristol_dib3000mc_config[adap->id])) == NULL ? -ENODEV : 0; } static int eeprom_read(struct i2c_adapter *adap,u8 adrs,u8 *pval) { struct i2c_msg msg[2] = { { .addr = 0x50, .flags = 0, .buf = &adrs, .len = 1 }, { .addr = 0x50, .flags = I2C_M_RD, .buf = pval, .len = 1 }, }; if (i2c_transfer(adap, msg, 2) != 2) return -EREMOTEIO; return 0; } static int bristol_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *prim_i2c = &adap->dev->i2c_adap; struct i2c_adapter *tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1); s8 a; int if1=1220; if (adap->dev->udev->descriptor.idVendor == cpu_to_le16(USB_VID_HAUPPAUGE) && adap->dev->udev->descriptor.idProduct == cpu_to_le16(USB_PID_HAUPPAUGE_NOVA_T_500_2)) { if (!eeprom_read(prim_i2c,0x59 + adap->id,&a)) if1=1220+a; } return dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &bristol_mt2060_config[adap->id], if1) == NULL ? -ENODEV : 0; } /* STK7700D: Pinnacle/Terratec/Hauppauge Dual DVB-T Diversity */ /* MT226x */ static struct dibx000_agc_config stk7700d_7000p_mt2266_agc_config[2] = { { BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), 1130, 21, 0, 118, 0, 3530, 1, 0, 65535, 33770, 65535, 23592, 0, 62, 255, 64, 64, 132, 192, 80, 80, 17, 27, 23, 51, 1, }, { BAND_VHF | BAND_LBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), 2372, 21, 0, 118, 0, 3530, 1, 0, 65535, 0, 65535, 23592, 0, 128, 128, 128, 0, 128, 253, 81, 0, 17, 27, 23, 51, 1, } }; static struct dibx000_bandwidth_config stk7700d_mt2266_pll_config = { .internal = 60000, .sampling = 30000, .pll_prediv = 1, .pll_ratio = 8, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = 0, .timf = 20452225, }; static struct dib7000p_config stk7700d_dib7000p_mt2266_config[] = { { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .agc_config_count = 2, .agc = stk7700d_7000p_mt2266_agc_config, .bw = &stk7700d_mt2266_pll_config, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, }, { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .agc_config_count = 2, .agc = stk7700d_7000p_mt2266_agc_config, .bw = &stk7700d_mt2266_pll_config, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, } }; static struct mt2266_config stk7700d_mt2266_config[2] = { { .i2c_address = 0x60 }, { .i2c_address = 0x60 } }; static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (adap->id == 0) { dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80 + (adap->id << 1), &stk7700d_dib7000p_mt2266_config[adap->id]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (adap->id == 0) { dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 2, 18, stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80 + (adap->id << 1), &stk7700d_dib7000p_mt2266_config[adap->id]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk7700d_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *tun_i2c; struct dib0700_adapter_state *state = adap->priv; tun_i2c = state->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); return dvb_attach(mt2266_attach, adap->fe_adap[0].fe, tun_i2c, &stk7700d_mt2266_config[adap->id]) == NULL ? -ENODEV : 0; } /* STK7700-PH: Digital/Analog Hybrid Tuner, e.h. Cinergy HT USB HE */ static struct dibx000_agc_config xc3028_agc_config = { .band_caps = BAND_VHF | BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0, * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (0 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), .inv_gain = 712, .time_stabiliz = 21, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 2867, .wbd_sel = 0, .wbd_alpha = 2, .agc1_max = 0, .agc1_min = 0, .agc2_max = 39718, .agc2_min = 9930, .agc1_pt1 = 0, .agc1_pt2 = 0, .agc1_pt3 = 0, .agc1_slope1 = 0, .agc1_slope2 = 0, .agc2_pt1 = 0, .agc2_pt2 = 128, .agc2_slope1 = 29, .agc2_slope2 = 29, .alpha_mant = 17, .alpha_exp = 27, .beta_mant = 23, .beta_exp = 51, .perform_agc_softsplit = 1, }; /* PLL Configuration for COFDM BW_MHz = 8.00 with external clock = 30.00 */ static struct dibx000_bandwidth_config xc3028_bw_config = { .internal = 60000, .sampling = 30000, .pll_prediv = 1, .pll_ratio = 8, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 0, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), /* sad_cfg: refsel, sel, freq_15k */ .ifreq = (1 << 25) | 5816102, /* ifreq = 5.200000 MHz */ .timf = 20452225, .xtal_hz = 30000000, }; static struct dib7000p_config stk7700ph_dib7700_xc3028_config = { .output_mpeg2_in_188_bytes = 1, .tuner_is_baseband = 1, .agc_config_count = 1, .agc = &xc3028_agc_config, .bw = &xc3028_bw_config, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, }; static int stk7700ph_xc3028_callback(void *ptr, int component, int command, int arg) { struct dvb_usb_adapter *adap = ptr; struct dib0700_adapter_state *state = adap->priv; switch (command) { case XC2028_TUNER_RESET: /* Send the tuner in then out of reset */ state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 0); msleep(10); state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); break; case XC2028_RESET_CLK: case XC2028_I2C_FLUSH: break; default: err("%s: unknown command %d, arg %d\n", __func__, command, arg); return -EINVAL; } return 0; } static struct xc2028_ctrl stk7700ph_xc3028_ctrl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64, .demod = XC3028_FE_DIBCOM52, }; static struct xc2028_config stk7700ph_xc3028_config = { .i2c_addr = 0x61, .ctrl = &stk7700ph_xc3028_ctrl, }; static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap) { struct usb_device_descriptor *desc = &adap->dev->udev->descriptor; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (desc->idVendor == cpu_to_le16(USB_VID_PINNACLE) && desc->idProduct == cpu_to_le16(USB_PID_PINNACLE_EXPRESSCARD_320CX)) dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); else dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); msleep(10); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, &stk7700ph_dib7700_xc3028_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &stk7700ph_dib7700_xc3028_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *tun_i2c; struct dib0700_adapter_state *state = adap->priv; tun_i2c = state->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); stk7700ph_xc3028_config.i2c_adap = tun_i2c; /* FIXME: generalize & move to common area */ adap->fe_adap[0].fe->callback = stk7700ph_xc3028_callback; return dvb_attach(xc2028_attach, adap->fe_adap[0].fe, &stk7700ph_xc3028_config) == NULL ? -ENODEV : 0; } #define DEFAULT_RC_INTERVAL 50 /* * This function is used only when firmware is < 1.20 version. Newer * firmwares use bulk mode, with functions implemented at dib0700_core, * at dib0700_rc_urb_completion() */ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) { enum rc_proto protocol; u32 scancode; u8 toggle; int i; struct dib0700_state *st = d->priv; if (st->fw_version >= 0x10200) { /* For 1.20 firmware , We need to keep the RC polling callback so we can reuse the input device setup in dvb-usb-remote.c. However, the actual work is being done in the bulk URB completion handler. */ return 0; } st->buf[0] = REQUEST_POLL_RC; st->buf[1] = 0; i = dib0700_ctrl_rd(d, st->buf, 2, st->buf, 4); if (i <= 0) { err("RC Query Failed"); return -EIO; } /* losing half of KEY_0 events from Philipps rc5 remotes.. */ if (st->buf[0] == 0 && st->buf[1] == 0 && st->buf[2] == 0 && st->buf[3] == 0) return 0; /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)st->buf[3 - 2],(int)st->buf[3 - 3],(int)st->buf[3 - 1],(int)st->buf[3]); */ dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */ switch (d->props.rc.core.protocol) { case RC_PROTO_BIT_NEC: /* NEC protocol sends repeat code as 0 0 0 FF */ if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) && (st->buf[3] == 0xff)) { rc_repeat(d->rc_dev); return 0; } protocol = RC_PROTO_NEC; scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]); toggle = 0; break; default: /* RC-5 protocol changes toggle bit on new keypress */ protocol = RC_PROTO_RC5; scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]); toggle = st->buf[3 - 1]; break; } rc_keydown(d->rc_dev, protocol, scancode, toggle); return 0; } /* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */ static struct dibx000_agc_config stk7700p_7000m_mt2060_agc_config = { BAND_UHF | BAND_VHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), 712, 41, 0, 118, 0, 4095, 0, 0, 42598, 17694, 45875, 2621, 0, 76, 139, 52, 59, 107, 172, 57, 70, 21, 25, 28, 48, 1, { 0, 107, 51800, 24700 }, }; static struct dibx000_agc_config stk7700p_7000p_mt2060_agc_config = { .band_caps = BAND_UHF | BAND_VHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), .inv_gain = 712, .time_stabiliz = 41, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 4095, .wbd_sel = 0, .wbd_alpha = 0, .agc1_max = 42598, .agc1_min = 16384, .agc2_max = 42598, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 137, .agc1_pt3 = 255, .agc1_slope1 = 0, .agc1_slope2 = 255, .agc2_pt1 = 0, .agc2_pt2 = 0, .agc2_slope1 = 0, .agc2_slope2 = 41, .alpha_mant = 15, .alpha_exp = 25, .beta_mant = 28, .beta_exp = 48, .perform_agc_softsplit = 0, }; static struct dibx000_bandwidth_config stk7700p_pll_config = { .internal = 60000, .sampling = 30000, .pll_prediv = 1, .pll_ratio = 8, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 0, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = 60258167, .timf = 20452225, .xtal_hz = 30000000, }; static struct dib7000m_config stk7700p_dib7000m_config = { .dvbt_mode = 1, .output_mpeg2_in_188_bytes = 1, .quartz_direct = 1, .agc_config_count = 1, .agc = &stk7700p_7000m_mt2060_agc_config, .bw = &stk7700p_pll_config, .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS, }; static struct dib7000p_config stk7700p_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &stk7700p_7000p_mt2060_agc_config, .bw = &stk7700p_pll_config, .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS, }; static int stk7700p_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; /* unless there is no real power management in DVB - we leave the device on GPIO6 */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(50); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_ctrl_clock(adap->dev, 72, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(100); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); st->mt2060_if1[0] = 1220; if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap)) { adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 18, &stk7700p_dib7000p_config); st->is_dib7000pc = 1; } else { memset(&state->dib7000p_ops, 0, sizeof(state->dib7000p_ops)); adap->fe_adap[0].fe = dvb_attach(dib7000m_attach, &adap->dev->i2c_adap, 18, &stk7700p_dib7000m_config); } return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static struct mt2060_config stk7700p_mt2060_config = { 0x60 }; static int stk7700p_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *prim_i2c = &adap->dev->i2c_adap; struct dib0700_state *st = adap->dev->priv; struct i2c_adapter *tun_i2c; struct dib0700_adapter_state *state = adap->priv; s8 a; int if1=1220; if (adap->dev->udev->descriptor.idVendor == cpu_to_le16(USB_VID_HAUPPAUGE) && adap->dev->udev->descriptor.idProduct == cpu_to_le16(USB_PID_HAUPPAUGE_NOVA_T_STICK)) { if (!eeprom_read(prim_i2c,0x58,&a)) if1=1220+a; } if (st->is_dib7000pc) tun_i2c = state->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); else tun_i2c = dib7000m_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); return dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk7700p_mt2060_config, if1) == NULL ? -ENODEV : 0; } /* DIB7070 generic */ static struct dibx000_agc_config dib7070_agc_config = { .band_caps = BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 600, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 3530, .wbd_sel = 1, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 40, .agc1_pt3 = 183, .agc1_slope1 = 206, .agc1_slope2 = 255, .agc2_pt1 = 72, .agc2_pt2 = 152, .agc2_slope1 = 88, .agc2_slope2 = 90, .alpha_mant = 17, .alpha_exp = 27, .beta_mant = 23, .beta_exp = 51, .perform_agc_softsplit = 0, }; static int dib7070_tuner_reset(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; deb_info("reset: %d", onoff); return state->dib7000p_ops.set_gpio(fe, 8, 0, !onoff); } static int dib7070_tuner_sleep(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; deb_info("sleep: %d", onoff); return state->dib7000p_ops.set_gpio(fe, 9, 0, onoff); } static struct dib0070_config dib7070p_dib0070_config[2] = { { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib7070_tuner_reset, .sleep = dib7070_tuner_sleep, .clock_khz = 12000, .clock_pad_drive = 4, .charge_pump = 2, }, { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib7070_tuner_reset, .sleep = dib7070_tuner_sleep, .clock_khz = 12000, .charge_pump = 2, } }; static struct dib0070_config dib7770p_dib0070_config = { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib7070_tuner_reset, .sleep = dib7070_tuner_sleep, .clock_khz = 12000, .clock_pad_drive = 0, .flip_chip = 1, .charge_pump = 2, }; static int dib7070_set_param_override(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; u16 offset; u8 band = BAND_OF_FREQUENCY(p->frequency/1000); switch (band) { case BAND_VHF: offset = 950; break; case BAND_UHF: default: offset = 550; break; } deb_info("WBD for DiB7000P: %d\n", offset + dib0070_wbd_offset(fe)); state->dib7000p_ops.set_wbd_ref(fe, offset + dib0070_wbd_offset(fe)); return state->set_param_save(fe); } static int dib7770_set_param_override(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; u16 offset; u8 band = BAND_OF_FREQUENCY(p->frequency/1000); switch (band) { case BAND_VHF: state->dib7000p_ops.set_gpio(fe, 0, 0, 1); offset = 850; break; case BAND_UHF: default: state->dib7000p_ops.set_gpio(fe, 0, 0, 0); offset = 250; break; } deb_info("WBD for DiB7000P: %d\n", offset + dib0070_wbd_offset(fe)); state->dib7000p_ops.set_wbd_ref(fe, offset + dib0070_wbd_offset(fe)); return state->set_param_save(fe); } static int dib7770p_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib7770p_dib0070_config) == NULL) return -ENODEV; st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7770_set_param_override; return 0; } static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (adap->id == 0) { if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib7070p_dib0070_config[0]) == NULL) return -ENODEV; } else { if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib7070p_dib0070_config[1]) == NULL) return -ENODEV; } st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7070_set_param_override; return 0; } static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) { struct dib0700_adapter_state *state = adapter->priv; struct dib0700_state *st = adapter->dev->priv; if (st->is_dib7000pc) return state->dib7000p_ops.pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); return dib7000m_pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); } static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) { struct dib0700_state *st = adapter->dev->priv; struct dib0700_adapter_state *state = adapter->priv; if (st->is_dib7000pc) return state->dib7000p_ops.pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); return dib7000m_pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); } static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) { struct dib0700_adapter_state *state = adapter->priv; return state->dib7000p_ops.pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); } static int stk70x0p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) { struct dib0700_adapter_state *state = adapter->priv; return state->dib7000p_ops.pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); } static struct dibx000_bandwidth_config dib7070_bw_config_12_mhz = { .internal = 60000, .sampling = 15000, .pll_prediv = 1, .pll_ratio = 20, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = (0 << 25) | 0, .timf = 20452225, .xtal_hz = 12000000, }; static struct dib7000p_config dib7070p_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &dib7070_agc_config, .bw = &dib7070_bw_config_12_mhz, .tuner_is_baseband = 1, .spur_protect = 1, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, }; /* STK7070P */ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) { struct usb_device_descriptor *p = &adap->dev->udev->descriptor; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) && p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E)) dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); else dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, &dib7070p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &dib7070p_dib7000p_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* STK7770P */ static struct dib7000p_config dib7770p_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &dib7070_agc_config, .bw = &dib7070_bw_config_12_mhz, .tuner_is_baseband = 1, .spur_protect = 1, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .enable_current_mirror = 1, .disable_sample_and_hold = 0, }; static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) { struct usb_device_descriptor *p = &adap->dev->udev->descriptor; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) && p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E)) dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); else dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, &dib7770p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &dib7770p_dib7000p_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* DIB807x generic */ static struct dibx000_agc_config dib807x_agc_config[2] = { { BAND_VHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, * P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, * P_agc_inv_pwm2=0,P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, * P_agc_write=0 */ (0 << 15) | (0 << 14) | (7 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), /* setup*/ 600, /* inv_gain*/ 10, /* time_stabiliz*/ 0, /* alpha_level*/ 118, /* thlock*/ 0, /* wbd_inv*/ 3530, /* wbd_ref*/ 1, /* wbd_sel*/ 5, /* wbd_alpha*/ 65535, /* agc1_max*/ 0, /* agc1_min*/ 65535, /* agc2_max*/ 0, /* agc2_min*/ 0, /* agc1_pt1*/ 40, /* agc1_pt2*/ 183, /* agc1_pt3*/ 206, /* agc1_slope1*/ 255, /* agc1_slope2*/ 72, /* agc2_pt1*/ 152, /* agc2_pt2*/ 88, /* agc2_slope1*/ 90, /* agc2_slope2*/ 17, /* alpha_mant*/ 27, /* alpha_exp*/ 23, /* beta_mant*/ 51, /* beta_exp*/ 0, /* perform_agc_softsplit*/ }, { BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, * P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, * P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, * P_agc_write=0 */ (0 << 15) | (0 << 14) | (1 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), /* setup */ 600, /* inv_gain*/ 10, /* time_stabiliz*/ 0, /* alpha_level*/ 118, /* thlock*/ 0, /* wbd_inv*/ 3530, /* wbd_ref*/ 1, /* wbd_sel*/ 5, /* wbd_alpha*/ 65535, /* agc1_max*/ 0, /* agc1_min*/ 65535, /* agc2_max*/ 0, /* agc2_min*/ 0, /* agc1_pt1*/ 40, /* agc1_pt2*/ 183, /* agc1_pt3*/ 206, /* agc1_slope1*/ 255, /* agc1_slope2*/ 72, /* agc2_pt1*/ 152, /* agc2_pt2*/ 88, /* agc2_slope1*/ 90, /* agc2_slope2*/ 17, /* alpha_mant*/ 27, /* alpha_exp*/ 23, /* beta_mant*/ 51, /* beta_exp*/ 0, /* perform_agc_softsplit*/ } }; static struct dibx000_bandwidth_config dib807x_bw_config_12_mhz = { .internal = 60000, .sampling = 15000, .pll_prediv = 1, .pll_ratio = 20, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (599 << 0), /* sad_cfg: refsel, sel, freq_15k*/ .ifreq = (0 << 25) | 0, /* ifreq = 0.000000 MHz*/ .timf = 18179755, .xtal_hz = 12000000, }; static struct dib8000_config dib807x_dib8000_config[2] = { { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 2, .agc = dib807x_agc_config, .pll = &dib807x_bw_config_12_mhz, .tuner_is_baseband = 1, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .div_cfg = 1, .agc_control = &dib0070_ctrl_agc_filter, .output_mode = OUTMODE_MPEG2_FIFO, .drives = 0x2d98, }, { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 2, .agc = dib807x_agc_config, .pll = &dib807x_bw_config_12_mhz, .tuner_is_baseband = 1, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .agc_control = &dib0070_ctrl_agc_filter, .output_mode = OUTMODE_MPEG2_FIFO, .drives = 0x2d98, } }; static int dib80xx_tuner_reset(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; return state->dib8000_ops.set_gpio(fe, 5, 0, !onoff); } static int dib80xx_tuner_sleep(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; return state->dib8000_ops.set_gpio(fe, 0, 0, onoff); } static const struct dib0070_wbd_gain_cfg dib8070_wbd_gain_cfg[] = { { 240, 7}, { 0xffff, 6}, }; static struct dib0070_config dib807x_dib0070_config[2] = { { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib80xx_tuner_reset, .sleep = dib80xx_tuner_sleep, .clock_khz = 12000, .clock_pad_drive = 4, .vga_filter = 1, .force_crystal_mode = 1, .enable_third_order_filter = 1, .charge_pump = 0, .wbd_gain = dib8070_wbd_gain_cfg, .osc_buffer_state = 0, .freq_offset_khz_uhf = -100, .freq_offset_khz_vhf = -100, }, { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib80xx_tuner_reset, .sleep = dib80xx_tuner_sleep, .clock_khz = 12000, .clock_pad_drive = 2, .vga_filter = 1, .force_crystal_mode = 1, .enable_third_order_filter = 1, .charge_pump = 0, .wbd_gain = dib8070_wbd_gain_cfg, .osc_buffer_state = 0, .freq_offset_khz_uhf = -25, .freq_offset_khz_vhf = -25, } }; static int dib807x_set_param_override(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; u16 offset = dib0070_wbd_offset(fe); u8 band = BAND_OF_FREQUENCY(p->frequency/1000); switch (band) { case BAND_VHF: offset += 750; break; case BAND_UHF: /* fall-thru wanted */ default: offset += 250; break; } deb_info("WBD for DiB8000: %d\n", offset); state->dib8000_ops.set_wbd_ref(fe, offset); return state->set_param_save(fe); } static int dib807x_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (adap->id == 0) { if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib807x_dib0070_config[0]) == NULL) return -ENODEV; } else { if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib807x_dib0070_config[1]) == NULL) return -ENODEV; } st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib807x_set_param_override; return 0; } static int stk80xx_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) { struct dib0700_adapter_state *state = adapter->priv; return state->dib8000_ops.pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); } static int stk80xx_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) { struct dib0700_adapter_state *state = adapter->priv; return state->dib8000_ops.pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); } /* STK807x */ static int stk807x_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &dib807x_dib8000_config[0]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* STK807xPVR */ static int stk807xpvr_frontend_attach0(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(500); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); /* initialize IC 0 */ state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x22, 0x80, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &dib807x_dib8000_config[0]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; /* initialize IC 1 */ state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x12, 0x82, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib807x_dib8000_config[1]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* STK8096GP */ static struct dibx000_agc_config dib8090_agc_config[2] = { { .band_caps = BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 787, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 3530, .wbd_sel = 1, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 32, .agc1_pt3 = 114, .agc1_slope1 = 143, .agc1_slope2 = 144, .agc2_pt1 = 114, .agc2_pt2 = 227, .agc2_slope1 = 116, .agc2_slope2 = 117, .alpha_mant = 28, .alpha_exp = 26, .beta_mant = 31, .beta_exp = 51, .perform_agc_softsplit = 0, }, { .band_caps = BAND_CBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 787, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 3530, .wbd_sel = 1, .wbd_alpha = 5, .agc1_max = 0, .agc1_min = 0, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 32, .agc1_pt3 = 114, .agc1_slope1 = 143, .agc1_slope2 = 144, .agc2_pt1 = 114, .agc2_pt2 = 227, .agc2_slope1 = 116, .agc2_slope2 = 117, .alpha_mant = 28, .alpha_exp = 26, .beta_mant = 31, .beta_exp = 51, .perform_agc_softsplit = 0, } }; static struct dibx000_bandwidth_config dib8090_pll_config_12mhz = { .internal = 54000, .sampling = 13500, .pll_prediv = 1, .pll_ratio = 18, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (599 << 0), .ifreq = (0 << 25) | 0, .timf = 20199727, .xtal_hz = 12000000, }; static int dib8090_get_adc_power(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; return state->dib8000_ops.get_adc_power(fe, 1); } static void dib8090_agc_control(struct dvb_frontend *fe, u8 restart) { deb_info("AGC control callback: %i\n", restart); dib0090_dcc_freq(fe, restart); if (restart == 0) /* before AGC startup */ dib0090_set_dc_servo(fe, 1); } static struct dib8000_config dib809x_dib8000_config[2] = { { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 2, .agc = dib8090_agc_config, .agc_control = dib8090_agc_control, .pll = &dib8090_pll_config_12mhz, .tuner_is_baseband = 1, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .div_cfg = 0x31, .output_mode = OUTMODE_MPEG2_FIFO, .drives = 0x2d98, .diversity_delay = 48, .refclksel = 3, }, { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 2, .agc = dib8090_agc_config, .agc_control = dib8090_agc_control, .pll = &dib8090_pll_config_12mhz, .tuner_is_baseband = 1, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .div_cfg = 0x31, .output_mode = OUTMODE_DIVERSITY, .drives = 0x2d08, .diversity_delay = 1, .refclksel = 3, } }; static struct dib0090_wbd_slope dib8090_wbd_table[] = { /* max freq ; cold slope ; cold offset ; warm slope ; warm offset ; wbd gain */ { 120, 0, 500, 0, 500, 4 }, /* CBAND */ { 170, 0, 450, 0, 450, 4 }, /* CBAND */ { 380, 48, 373, 28, 259, 6 }, /* VHF */ { 860, 34, 700, 36, 616, 6 }, /* high UHF */ { 0xFFFF, 34, 700, 36, 616, 6 }, /* default */ }; static struct dib0090_config dib809x_dib0090_config = { .io.pll_bypass = 1, .io.pll_range = 1, .io.pll_prediv = 1, .io.pll_loopdiv = 20, .io.adc_clock_ratio = 8, .io.pll_int_loop_filt = 0, .io.clock_khz = 12000, .reset = dib80xx_tuner_reset, .sleep = dib80xx_tuner_sleep, .clkouttobamse = 1, .analog_output = 1, .i2c_address = DEFAULT_DIB0090_I2C_ADDRESS, .use_pwm_agc = 1, .clkoutdrive = 1, .get_adc_power = dib8090_get_adc_power, .freq_offset_khz_uhf = -63, .freq_offset_khz_vhf = -143, .wbd = dib8090_wbd_table, .fref_clock_ratio = 6, }; static u8 dib8090_compute_pll_parameters(struct dvb_frontend *fe) { u8 optimal_pll_ratio = 20; u32 freq_adc, ratio, rest, max = 0; u8 pll_ratio; for (pll_ratio = 17; pll_ratio <= 20; pll_ratio++) { freq_adc = 12 * pll_ratio * (1 << 8) / 16; ratio = ((fe->dtv_property_cache.frequency / 1000) * (1 << 8) / 1000) / freq_adc; rest = ((fe->dtv_property_cache.frequency / 1000) * (1 << 8) / 1000) - ratio * freq_adc; if (rest > freq_adc / 2) rest = freq_adc - rest; deb_info("PLL ratio=%i rest=%i\n", pll_ratio, rest); if ((rest > max) && (rest > 717)) { optimal_pll_ratio = pll_ratio; max = rest; } } deb_info("optimal PLL ratio=%i\n", optimal_pll_ratio); return optimal_pll_ratio; } static int dib8096_set_param_override(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; u8 pll_ratio, band = BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000); u16 target, ltgain, rf_gain_limit; u32 timf; int ret = 0; enum frontend_tune_state tune_state = CT_SHUTDOWN; switch (band) { default: deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency); fallthrough; case BAND_VHF: state->dib8000_ops.set_gpio(fe, 3, 0, 1); break; case BAND_UHF: state->dib8000_ops.set_gpio(fe, 3, 0, 0); break; } ret = state->set_param_save(fe); if (ret < 0) return ret; if (fe->dtv_property_cache.bandwidth_hz != 6000000) { deb_info("only 6MHz bandwidth is supported\n"); return -EINVAL; } /* Update PLL if needed ratio */ state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, 0); /* Get optimize PLL ratio to remove spurious */ pll_ratio = dib8090_compute_pll_parameters(fe); if (pll_ratio == 17) timf = 21387946; else if (pll_ratio == 18) timf = 20199727; else if (pll_ratio == 19) timf = 19136583; else timf = 18179756; /* Update ratio */ state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, pll_ratio); state->dib8000_ops.ctrl_timf(fe, DEMOD_TIMF_SET, timf); if (band != BAND_CBAND) { /* dib0090_get_wbd_target is returning any possible temperature compensated wbd-target */ target = (dib0090_get_wbd_target(fe) * 8 * 18 / 33 + 1) / 2; state->dib8000_ops.set_wbd_ref(fe, target); } if (band == BAND_CBAND) { deb_info("tuning in CBAND - soft-AGC startup\n"); dib0090_set_tune_state(fe, CT_AGC_START); do { ret = dib0090_gain_control(fe); msleep(ret); tune_state = dib0090_get_tune_state(fe); if (tune_state == CT_AGC_STEP_0) state->dib8000_ops.set_gpio(fe, 6, 0, 1); else if (tune_state == CT_AGC_STEP_1) { dib0090_get_current_gain(fe, NULL, NULL, &rf_gain_limit, <gain); if (rf_gain_limit < 2000) /* activate the external attenuator in case of very high input power */ state->dib8000_ops.set_gpio(fe, 6, 0, 0); } } while (tune_state < CT_AGC_STOP); deb_info("switching to PWM AGC\n"); dib0090_pwm_gain_reset(fe); state->dib8000_ops.pwm_agc_reset(fe); state->dib8000_ops.set_tune_state(fe, CT_DEMOD_START); } else { /* for everything else than CBAND we are using standard AGC */ deb_info("not tuning in CBAND - standard AGC startup\n"); dib0090_pwm_gain_reset(fe); } return 0; } static int dib809x_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); /* FIXME: if adap->id != 0, check if it is fe_adap[1] */ if (!dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config)) return -ENODEV; st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override; return 0; } static int stk809x_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk809x_frontend1_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x82, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int nim8096md_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c; struct dvb_frontend *fe_slave = st->dib8000_ops.get_slave_frontend(adap->fe_adap[0].fe, 1); if (fe_slave) { tun_i2c = st->dib8000_ops.get_i2c_master(fe_slave, DIBX000_I2C_INTERFACE_TUNER, 1); if (dvb_attach(dib0090_register, fe_slave, tun_i2c, &dib809x_dib0090_config) == NULL) return -ENODEV; fe_slave->dvb = adap->fe_adap[0].fe->dvb; fe_slave->ops.tuner_ops.set_params = dib8096_set_param_override; } tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL) return -ENODEV; st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override; return 0; } static int nim8096md_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_frontend *fe_slave; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(20); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(1000); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 2, 18, 0x80, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]); if (adap->fe_adap[0].fe == NULL) return -ENODEV; /* Needed to increment refcount */ if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; fe_slave = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]); state->dib8000_ops.set_slave_frontend(adap->fe_adap[0].fe, fe_slave); return fe_slave == NULL ? -ENODEV : 0; } /* TFE8096P */ static struct dibx000_agc_config dib8096p_agc_config[2] = { { .band_caps = BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 684, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 1200, .wbd_sel = 3, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 32767, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 0, .agc1_pt3 = 105, .agc1_slope1 = 0, .agc1_slope2 = 156, .agc2_pt1 = 105, .agc2_pt2 = 255, .agc2_slope1 = 54, .agc2_slope2 = 0, .alpha_mant = 28, .alpha_exp = 26, .beta_mant = 31, .beta_exp = 51, .perform_agc_softsplit = 0, } , { .band_caps = BAND_FM | BAND_VHF | BAND_CBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 732, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 1200, .wbd_sel = 3, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 32767, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 0, .agc1_pt3 = 98, .agc1_slope1 = 0, .agc1_slope2 = 167, .agc2_pt1 = 98, .agc2_pt2 = 255, .agc2_slope1 = 52, .agc2_slope2 = 0, .alpha_mant = 28, .alpha_exp = 26, .beta_mant = 31, .beta_exp = 51, .perform_agc_softsplit = 0, } }; static struct dibx000_bandwidth_config dib8096p_clock_config_12_mhz = { .internal = 108000, .sampling = 13500, .pll_prediv = 1, .pll_ratio = 9, .pll_range = 1, .pll_reset = 0, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 0, .ADClkSrc = 0, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = (0 << 25) | 0, .timf = 20199729, .xtal_hz = 12000000, }; static struct dib8000_config tfe8096p_dib8000_config = { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .update_lna = NULL, .agc_config_count = 2, .agc = dib8096p_agc_config, .pll = &dib8096p_clock_config_12_mhz, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .agc_control = NULL, .diversity_delay = 48, .output_mode = OUTMODE_MPEG2_FIFO, .enMpegOutput = 1, }; static struct dib0090_wbd_slope dib8096p_wbd_table[] = { { 380, 81, 850, 64, 540, 4}, { 860, 51, 866, 21, 375, 4}, {1700, 0, 250, 0, 100, 6}, {2600, 0, 250, 0, 100, 6}, { 0xFFFF, 0, 0, 0, 0, 0}, }; static struct dib0090_config tfe8096p_dib0090_config = { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = -143, .freq_offset_khz_vhf = -143, .get_adc_power = dib8090_get_adc_power, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 1, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, .force_cband_input = 0, }; struct dibx090p_best_adc { u32 timf; u32 pll_loopdiv; u32 pll_prediv; }; static int dib8096p_get_best_sampling(struct dvb_frontend *fe, struct dibx090p_best_adc *adc) { u8 spur = 0, prediv = 0, loopdiv = 0, min_prediv = 1, max_prediv = 1; u16 xtal = 12000; u16 fcp_min = 1900; /* PLL, Minimum Frequency of phase comparator (KHz) */ u16 fcp_max = 20000; /* PLL, Maximum Frequency of phase comparator (KHz) */ u32 fmem_max = 140000; /* 140MHz max SDRAM freq */ u32 fdem_min = 66000; u32 fcp = 0, fs = 0, fdem = 0, fmem = 0; u32 harmonic_id = 0; adc->timf = 0; adc->pll_loopdiv = loopdiv; adc->pll_prediv = prediv; deb_info("bandwidth = %d", fe->dtv_property_cache.bandwidth_hz); /* Find Min and Max prediv */ while ((xtal / max_prediv) >= fcp_min) max_prediv++; max_prediv--; min_prediv = max_prediv; while ((xtal / min_prediv) <= fcp_max) { min_prediv--; if (min_prediv == 1) break; } deb_info("MIN prediv = %d : MAX prediv = %d", min_prediv, max_prediv); min_prediv = 1; for (prediv = min_prediv; prediv < max_prediv; prediv++) { fcp = xtal / prediv; if (fcp > fcp_min && fcp < fcp_max) { for (loopdiv = 1; loopdiv < 64; loopdiv++) { fmem = ((xtal/prediv) * loopdiv); fdem = fmem / 2; fs = fdem / 4; /* test min/max system restrictions */ if ((fdem >= fdem_min) && (fmem <= fmem_max) && (fs >= fe->dtv_property_cache.bandwidth_hz / 1000)) { spur = 0; /* test fs harmonics positions */ for (harmonic_id = (fe->dtv_property_cache.frequency / (1000 * fs)); harmonic_id <= ((fe->dtv_property_cache.frequency / (1000 * fs)) + 1); harmonic_id++) { if (((fs * harmonic_id) >= (fe->dtv_property_cache.frequency / 1000 - (fe->dtv_property_cache.bandwidth_hz / 2000))) && ((fs * harmonic_id) <= (fe->dtv_property_cache.frequency / 1000 + (fe->dtv_property_cache.bandwidth_hz / 2000)))) { spur = 1; break; } } if (!spur) { adc->pll_loopdiv = loopdiv; adc->pll_prediv = prediv; adc->timf = (4260880253U / fdem) * (1 << 8); adc->timf += ((4260880253U % fdem) << 8) / fdem; deb_info("RF %6d; BW %6d; Xtal %6d; Fmem %6d; Fdem %6d; Fs %6d; Prediv %2d; Loopdiv %2d; Timf %8d;", fe->dtv_property_cache.frequency, fe->dtv_property_cache.bandwidth_hz, xtal, fmem, fdem, fs, prediv, loopdiv, adc->timf); break; } } } } if (!spur) break; } if (adc->pll_loopdiv == 0 && adc->pll_prediv == 0) return -EINVAL; return 0; } static int dib8096p_agc_startup(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; struct dibx000_bandwidth_config pll; struct dibx090p_best_adc adc; u16 target; int ret; ret = state->set_param_save(fe); if (ret < 0) return ret; memset(&pll, 0, sizeof(struct dibx000_bandwidth_config)); dib0090_pwm_gain_reset(fe); /* dib0090_get_wbd_target is returning any possible temperature compensated wbd-target */ target = (dib0090_get_wbd_target(fe) * 8 + 1) / 2; state->dib8000_ops.set_wbd_ref(fe, target); if (dib8096p_get_best_sampling(fe, &adc) == 0) { pll.pll_ratio = adc.pll_loopdiv; pll.pll_prediv = adc.pll_prediv; dib0700_set_i2c_speed(adap->dev, 200); state->dib8000_ops.update_pll(fe, &pll, fe->dtv_property_cache.bandwidth_hz / 1000, 0); state->dib8000_ops.ctrl_timf(fe, DEMOD_TIMF_SET, adc.timf); dib0700_set_i2c_speed(adap->dev, 1000); } return 0; } static int tfe8096p_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; u32 fw_version; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_get_version(adap->dev, NULL, NULL, &fw_version, NULL); if (fw_version >= 0x10200) st->fw_use_new_i2c_api = 1; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x80, 1); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &tfe8096p_dib8000_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int tfe8096p_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_tuner(adap->fe_adap[0].fe); tfe8096p_dib0090_config.reset = st->dib8000_ops.tuner_sleep; tfe8096p_dib0090_config.sleep = st->dib8000_ops.tuner_sleep; tfe8096p_dib0090_config.wbd = dib8096p_wbd_table; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe8096p_dib0090_config) == NULL) return -ENODEV; st->dib8000_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096p_agc_startup; return 0; } /* STK9090M */ static int dib90x0_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) { return dib9000_fw_pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); } static int dib90x0_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) { return dib9000_fw_pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); } static int dib90x0_tuner_reset(struct dvb_frontend *fe, int onoff) { return dib9000_set_gpio(fe, 5, 0, !onoff); } static int dib90x0_tuner_sleep(struct dvb_frontend *fe, int onoff) { return dib9000_set_gpio(fe, 0, 0, onoff); } static int dib01x0_pmu_update(struct i2c_adapter *i2c, u16 *data, u8 len) { u8 wb[4] = { 0xc >> 8, 0xc & 0xff, 0, 0 }; u8 rb[2]; struct i2c_msg msg[2] = { {.addr = 0x1e >> 1, .flags = 0, .buf = wb, .len = 2}, {.addr = 0x1e >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2}, }; u8 index_data; dibx000_i2c_set_speed(i2c, 250); if (i2c_transfer(i2c, msg, 2) != 2) return -EIO; switch (rb[0] << 8 | rb[1]) { case 0: deb_info("Found DiB0170 rev1: This version of DiB0170 is not supported any longer.\n"); return -EIO; case 1: deb_info("Found DiB0170 rev2"); break; case 2: deb_info("Found DiB0190 rev2"); break; default: deb_info("DiB01x0 not found"); return -EIO; } for (index_data = 0; index_data < len; index_data += 2) { wb[2] = (data[index_data + 1] >> 8) & 0xff; wb[3] = (data[index_data + 1]) & 0xff; if (data[index_data] == 0) { wb[0] = (data[index_data] >> 8) & 0xff; wb[1] = (data[index_data]) & 0xff; msg[0].len = 2; if (i2c_transfer(i2c, msg, 2) != 2) return -EIO; wb[2] |= rb[0]; wb[3] |= rb[1] & ~(3 << 4); } wb[0] = (data[index_data] >> 8)&0xff; wb[1] = (data[index_data])&0xff; msg[0].len = 4; if (i2c_transfer(i2c, &msg[0], 1) != 1) return -EIO; } return 0; } static struct dib9000_config stk9090m_config = { .output_mpeg2_in_188_bytes = 1, .output_mode = OUTMODE_MPEG2_FIFO, .vcxo_timer = 279620, .timing_frequency = 20452225, .demod_clock_khz = 60000, .xtal_clock_khz = 30000, .if_drives = (0 << 15) | (1 << 13) | (0 << 12) | (3 << 10) | (0 << 9) | (1 << 7) | (0 << 6) | (0 << 4) | (1 << 3) | (1 << 1) | (0), .subband = { 2, { { 240, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0008, 0x0000, 0x0008 } }, /* GPIO 3 to 1 for VHF */ { 890, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0008, 0x0000, 0x0000 } }, /* GPIO 3 to 0 for UHF */ { 0 }, }, }, .gpio_function = { { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_ON, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = (0x10 & ~0x1) | 0x20 }, { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_OFF, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = 0 | 0x21 }, }, }; static struct dib9000_config nim9090md_config[2] = { { .output_mpeg2_in_188_bytes = 1, .output_mode = OUTMODE_MPEG2_FIFO, .vcxo_timer = 279620, .timing_frequency = 20452225, .demod_clock_khz = 60000, .xtal_clock_khz = 30000, .if_drives = (0 << 15) | (1 << 13) | (0 << 12) | (3 << 10) | (0 << 9) | (1 << 7) | (0 << 6) | (0 << 4) | (1 << 3) | (1 << 1) | (0), }, { .output_mpeg2_in_188_bytes = 1, .output_mode = OUTMODE_DIVERSITY, .vcxo_timer = 279620, .timing_frequency = 20452225, .demod_clock_khz = 60000, .xtal_clock_khz = 30000, .if_drives = (0 << 15) | (1 << 13) | (0 << 12) | (3 << 10) | (0 << 9) | (1 << 7) | (0 << 6) | (0 << 4) | (1 << 3) | (1 << 1) | (0), .subband = { 2, { { 240, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0006, 0x0000, 0x0006 } }, /* GPIO 1 and 2 to 1 for VHF */ { 890, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0006, 0x0000, 0x0000 } }, /* GPIO 1 and 2 to 0 for UHF */ { 0 }, }, }, .gpio_function = { { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_ON, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = (0x10 & ~0x1) | 0x20 }, { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_OFF, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = 0 | 0x21 }, }, } }; static struct dib0090_config dib9090_dib0090_config = { .io.pll_bypass = 0, .io.pll_range = 1, .io.pll_prediv = 1, .io.pll_loopdiv = 8, .io.adc_clock_ratio = 8, .io.pll_int_loop_filt = 0, .io.clock_khz = 30000, .reset = dib90x0_tuner_reset, .sleep = dib90x0_tuner_sleep, .clkouttobamse = 0, .analog_output = 0, .use_pwm_agc = 0, .clkoutdrive = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, }; static struct dib0090_config nim9090md_dib0090_config[2] = { { .io.pll_bypass = 0, .io.pll_range = 1, .io.pll_prediv = 1, .io.pll_loopdiv = 8, .io.adc_clock_ratio = 8, .io.pll_int_loop_filt = 0, .io.clock_khz = 30000, .reset = dib90x0_tuner_reset, .sleep = dib90x0_tuner_sleep, .clkouttobamse = 1, .analog_output = 0, .use_pwm_agc = 0, .clkoutdrive = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, }, { .io.pll_bypass = 0, .io.pll_range = 1, .io.pll_prediv = 1, .io.pll_loopdiv = 8, .io.adc_clock_ratio = 8, .io.pll_int_loop_filt = 0, .io.clock_khz = 30000, .reset = dib90x0_tuner_reset, .sleep = dib90x0_tuner_sleep, .clkouttobamse = 0, .analog_output = 0, .use_pwm_agc = 0, .clkoutdrive = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, } }; static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; struct dib0700_state *st = adap->dev->priv; u32 fw_version; /* Make use of the new i2c functions from FW 1.20 */ dib0700_get_version(adap->dev, NULL, NULL, &fw_version, NULL); if (fw_version >= 0x10200) st->fw_use_new_i2c_api = 1; dib0700_set_i2c_speed(adap->dev, 340); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x80); if (request_firmware(&state->frontend_firmware, "dib9090.fw", &adap->dev->udev->dev)) { deb_info("%s: Upload failed. (file not found?)\n", __func__); return -ENODEV; } else { deb_info("%s: firmware read %zu bytes.\n", __func__, state->frontend_firmware->size); } stk9090m_config.microcode_B_fe_size = state->frontend_firmware->size; stk9090m_config.microcode_B_fe_buffer = state->frontend_firmware->data; adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config); if (!adap->fe_adap[0].fe) { release_firmware(state->frontend_firmware); return -ENODEV; } return 0; } static int dib9090_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; struct i2c_adapter *i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe); u16 data_dib190[10] = { 1, 0x1374, 2, 0x01a2, 7, 0x0020, 0, 0x00ef, 8, 0x0486, }; if (!IS_ENABLED(CONFIG_DVB_DIB9000)) return -ENODEV; if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); if (!i2c) return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0) return -ENODEV; dib0700_set_i2c_speed(adap->dev, 1500); if (dib9000_firmware_post_pll_init(adap->fe_adap[0].fe) < 0) return -ENODEV; release_firmware(state->frontend_firmware); return 0; } static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; struct dib0700_state *st = adap->dev->priv; struct i2c_adapter *i2c; struct dvb_frontend *fe_slave; u32 fw_version; /* Make use of the new i2c functions from FW 1.20 */ dib0700_get_version(adap->dev, NULL, NULL, &fw_version, NULL); if (fw_version >= 0x10200) st->fw_use_new_i2c_api = 1; dib0700_set_i2c_speed(adap->dev, 340); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (request_firmware(&state->frontend_firmware, "dib9090.fw", &adap->dev->udev->dev)) { deb_info("%s: Upload failed. (file not found?)\n", __func__); return -EIO; } else { deb_info("%s: firmware read %zu bytes.\n", __func__, state->frontend_firmware->size); } nim9090md_config[0].microcode_B_fe_size = state->frontend_firmware->size; nim9090md_config[0].microcode_B_fe_buffer = state->frontend_firmware->data; nim9090md_config[1].microcode_B_fe_size = state->frontend_firmware->size; nim9090md_config[1].microcode_B_fe_buffer = state->frontend_firmware->data; dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80); adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]); if (!adap->fe_adap[0].fe) { release_firmware(state->frontend_firmware); return -ENODEV; } i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0); dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82); fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]); dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave); if (!fe_slave) { release_firmware(state->frontend_firmware); return -ENODEV; } return 0; } static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; struct i2c_adapter *i2c; struct dvb_frontend *fe_slave; u16 data_dib190[10] = { 1, 0x5374, 2, 0x01ae, 7, 0x0020, 0, 0x00ef, 8, 0x0406, }; if (!IS_ENABLED(CONFIG_DVB_DIB9000)) return -ENODEV; i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe); if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); if (!i2c) return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0) return -ENODEV; dib0700_set_i2c_speed(adap->dev, 1500); if (dib9000_firmware_post_pll_init(adap->fe_adap[0].fe) < 0) return -ENODEV; fe_slave = dib9000_get_slave_frontend(adap->fe_adap[0].fe, 1); if (fe_slave != NULL) { i2c = dib9000_get_component_bus_interface(adap->fe_adap[0].fe); dib9000_set_i2c_adapter(fe_slave, i2c); i2c = dib9000_get_tuner_interface(fe_slave); if (dvb_attach(dib0090_fw_register, fe_slave, i2c, &nim9090md_dib0090_config[1]) == NULL) return -ENODEV; fe_slave->dvb = adap->fe_adap[0].fe->dvb; dib9000_fw_set_component_bus_speed(adap->fe_adap[0].fe, 1500); if (dib9000_firmware_post_pll_init(fe_slave) < 0) return -ENODEV; } release_firmware(state->frontend_firmware); return 0; } /* NIM7090 */ static int dib7090p_get_best_sampling(struct dvb_frontend *fe , struct dibx090p_best_adc *adc) { u8 spur = 0, prediv = 0, loopdiv = 0, min_prediv = 1, max_prediv = 1; u16 xtal = 12000; u32 fcp_min = 1900; /* PLL Minimum Frequency comparator KHz */ u32 fcp_max = 20000; /* PLL Maximum Frequency comparator KHz */ u32 fdem_max = 76000; u32 fdem_min = 69500; u32 fcp = 0, fs = 0, fdem = 0; u32 harmonic_id = 0; adc->pll_loopdiv = loopdiv; adc->pll_prediv = prediv; adc->timf = 0; deb_info("bandwidth = %d fdem_min =%d", fe->dtv_property_cache.bandwidth_hz, fdem_min); /* Find Min and Max prediv */ while ((xtal/max_prediv) >= fcp_min) max_prediv++; max_prediv--; min_prediv = max_prediv; while ((xtal/min_prediv) <= fcp_max) { min_prediv--; if (min_prediv == 1) break; } deb_info("MIN prediv = %d : MAX prediv = %d", min_prediv, max_prediv); min_prediv = 2; for (prediv = min_prediv ; prediv < max_prediv; prediv++) { fcp = xtal / prediv; if (fcp > fcp_min && fcp < fcp_max) { for (loopdiv = 1 ; loopdiv < 64 ; loopdiv++) { fdem = ((xtal/prediv) * loopdiv); fs = fdem / 4; /* test min/max system restrictions */ if ((fdem >= fdem_min) && (fdem <= fdem_max) && (fs >= fe->dtv_property_cache.bandwidth_hz/1000)) { spur = 0; /* test fs harmonics positions */ for (harmonic_id = (fe->dtv_property_cache.frequency / (1000*fs)) ; harmonic_id <= ((fe->dtv_property_cache.frequency / (1000*fs))+1) ; harmonic_id++) { if (((fs*harmonic_id) >= ((fe->dtv_property_cache.frequency/1000) - (fe->dtv_property_cache.bandwidth_hz/2000))) && ((fs*harmonic_id) <= ((fe->dtv_property_cache.frequency/1000) + (fe->dtv_property_cache.bandwidth_hz/2000)))) { spur = 1; break; } } if (!spur) { adc->pll_loopdiv = loopdiv; adc->pll_prediv = prediv; adc->timf = 2396745143UL/fdem*(1 << 9); adc->timf += ((2396745143UL%fdem) << 9)/fdem; deb_info("loopdiv=%i prediv=%i timf=%i", loopdiv, prediv, adc->timf); break; } } } } if (!spur) break; } if (adc->pll_loopdiv == 0 && adc->pll_prediv == 0) return -EINVAL; else return 0; } static int dib7090_agc_startup(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; struct dibx000_bandwidth_config pll; u16 target; struct dibx090p_best_adc adc; int ret; ret = state->set_param_save(fe); if (ret < 0) return ret; memset(&pll, 0, sizeof(struct dibx000_bandwidth_config)); dib0090_pwm_gain_reset(fe); target = (dib0090_get_wbd_target(fe) * 8 + 1) / 2; state->dib7000p_ops.set_wbd_ref(fe, target); if (dib7090p_get_best_sampling(fe, &adc) == 0) { pll.pll_ratio = adc.pll_loopdiv; pll.pll_prediv = adc.pll_prediv; state->dib7000p_ops.update_pll(fe, &pll); state->dib7000p_ops.ctrl_timf(fe, DEMOD_TIMF_SET, adc.timf); } return 0; } static int dib7090_agc_restart(struct dvb_frontend *fe, u8 restart) { deb_info("AGC restart callback: %d", restart); if (restart == 0) /* before AGC startup */ dib0090_set_dc_servo(fe, 1); return 0; } static int tfe7790p_update_lna(struct dvb_frontend *fe, u16 agc_global) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; deb_info("update LNA: agc global=%i", agc_global); if (agc_global < 25000) { state->dib7000p_ops.set_gpio(fe, 8, 0, 0); state->dib7000p_ops.set_agc1_min(fe, 0); } else { state->dib7000p_ops.set_gpio(fe, 8, 0, 1); state->dib7000p_ops.set_agc1_min(fe, 32768); } return 0; } static struct dib0090_wbd_slope dib7090_wbd_table[] = { { 380, 81, 850, 64, 540, 4}, { 860, 51, 866, 21, 375, 4}, {1700, 0, 250, 0, 100, 6}, {2600, 0, 250, 0, 100, 6}, { 0xFFFF, 0, 0, 0, 0, 0}, }; static struct dibx000_agc_config dib7090_agc_config[2] = { { .band_caps = BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 687, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 1200, .wbd_sel = 3, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 32768, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 32, .agc1_pt3 = 114, .agc1_slope1 = 143, .agc1_slope2 = 144, .agc2_pt1 = 114, .agc2_pt2 = 227, .agc2_slope1 = 116, .agc2_slope2 = 117, .alpha_mant = 18, .alpha_exp = 0, .beta_mant = 20, .beta_exp = 59, .perform_agc_softsplit = 0, } , { .band_caps = BAND_FM | BAND_VHF | BAND_CBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 732, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 1200, .wbd_sel = 3, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 0, .agc1_pt3 = 98, .agc1_slope1 = 0, .agc1_slope2 = 167, .agc2_pt1 = 98, .agc2_pt2 = 255, .agc2_slope1 = 104, .agc2_slope2 = 0, .alpha_mant = 18, .alpha_exp = 0, .beta_mant = 20, .beta_exp = 59, .perform_agc_softsplit = 0, } }; static struct dibx000_bandwidth_config dib7090_clock_config_12_mhz = { .internal = 60000, .sampling = 15000, .pll_prediv = 1, .pll_ratio = 5, .pll_range = 0, .pll_reset = 0, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = (0 << 25) | 0, .timf = 20452225, .xtal_hz = 15000000, }; static struct dib7000p_config nim7090_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .update_lna = tfe7790p_update_lna, /* GPIO used is the same as TFE7790 */ .agc_config_count = 2, .agc = dib7090_agc_config, .bw = &dib7090_clock_config_12_mhz, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .pwm_freq_div = 0, .agc_control = dib7090_agc_restart, .spur_protect = 0, .disable_sample_and_hold = 0, .enable_current_mirror = 0, .diversity_delay = 0, .output_mode = OUTMODE_MPEG2_FIFO, .enMpegOutput = 1, }; static int tfe7090p_pvr_update_lna(struct dvb_frontend *fe, u16 agc_global) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; deb_info("TFE7090P-PVR update LNA: agc global=%i", agc_global); if (agc_global < 25000) { state->dib7000p_ops.set_gpio(fe, 5, 0, 0); state->dib7000p_ops.set_agc1_min(fe, 0); } else { state->dib7000p_ops.set_gpio(fe, 5, 0, 1); state->dib7000p_ops.set_agc1_min(fe, 32768); } return 0; } static struct dib7000p_config tfe7090pvr_dib7000p_config[2] = { { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .update_lna = tfe7090p_pvr_update_lna, .agc_config_count = 2, .agc = dib7090_agc_config, .bw = &dib7090_clock_config_12_mhz, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .pwm_freq_div = 0, .agc_control = dib7090_agc_restart, .spur_protect = 0, .disable_sample_and_hold = 0, .enable_current_mirror = 0, .diversity_delay = 0, .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK, .default_i2c_addr = 0x90, .enMpegOutput = 1, }, { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .update_lna = tfe7090p_pvr_update_lna, .agc_config_count = 2, .agc = dib7090_agc_config, .bw = &dib7090_clock_config_12_mhz, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .pwm_freq_div = 0, .agc_control = dib7090_agc_restart, .spur_protect = 0, .disable_sample_and_hold = 0, .enable_current_mirror = 0, .diversity_delay = 0, .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK, .default_i2c_addr = 0x92, .enMpegOutput = 0, } }; static struct dib0090_config nim7090_dib0090_config = { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 0, .wbd = dib7090_wbd_table, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, }; static struct dib7000p_config tfe7790p_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .update_lna = tfe7790p_update_lna, .agc_config_count = 2, .agc = dib7090_agc_config, .bw = &dib7090_clock_config_12_mhz, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .pwm_freq_div = 0, .agc_control = dib7090_agc_restart, .spur_protect = 0, .disable_sample_and_hold = 0, .enable_current_mirror = 0, .diversity_delay = 0, .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK, .enMpegOutput = 1, }; static struct dib0090_config tfe7790p_dib0090_config = { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 0, .wbd = dib7090_wbd_table, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, .force_cband_input = 0, .is_dib7090e = 0, .force_crystal_mode = 1, }; static struct dib0090_config tfe7090pvr_dib0090_config[2] = { { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = 50, .freq_offset_khz_vhf = 70, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 0, .wbd = dib7090_wbd_table, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, }, { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = -50, .freq_offset_khz_vhf = -70, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 0, .wbd = dib7090_wbd_table, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, } }; static int nim7090_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int nim7090_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_tuner(adap->fe_adap[0].fe); nim7090_dib0090_config.reset = st->dib7000p_ops.tuner_sleep; nim7090_dib0090_config.sleep = st->dib7000p_ops.tuner_sleep; nim7090_dib0090_config.get_adc_power = st->dib7000p_ops.get_adc_power; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &nim7090_dib0090_config) == NULL) return -ENODEV; st->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup; return 0; } static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; /* The TFE7090 requires the dib0700 to not be in master mode */ st->disable_streaming_master_mode = 1; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); /* initialize IC 0 */ if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } dib0700_set_i2c_speed(adap->dev, 340); adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]); if (adap->fe_adap[0].fe == NULL) return -ENODEV; state->dib7000p_ops.slave_reset(adap->fe_adap[0].fe); return 0; } static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *i2c; struct dib0700_adapter_state *state = adap->priv; if (adap->dev->adapter[0].fe_adap[0].fe == NULL) { err("the master dib7090 has to be initialized first"); return -ENODEV; /* the master device has not been initialized */ } if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1); if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(i2c, 0x92, &tfe7090pvr_dib7000p_config[1]); dib0700_set_i2c_speed(adap->dev, 200); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int tfe7090pvr_tuner0_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_tuner(adap->fe_adap[0].fe); tfe7090pvr_dib0090_config[0].reset = st->dib7000p_ops.tuner_sleep; tfe7090pvr_dib0090_config[0].sleep = st->dib7000p_ops.tuner_sleep; tfe7090pvr_dib0090_config[0].get_adc_power = st->dib7000p_ops.get_adc_power; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe7090pvr_dib0090_config[0]) == NULL) return -ENODEV; st->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup; return 0; } static int tfe7090pvr_tuner1_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_tuner(adap->fe_adap[0].fe); tfe7090pvr_dib0090_config[1].reset = st->dib7000p_ops.tuner_sleep; tfe7090pvr_dib0090_config[1].sleep = st->dib7000p_ops.tuner_sleep; tfe7090pvr_dib0090_config[1].get_adc_power = st->dib7000p_ops.get_adc_power; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe7090pvr_dib0090_config[1]) == NULL) return -ENODEV; st->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup; return 0; } static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; /* The TFE7790P requires the dib0700 to not be in master mode */ st->disable_streaming_master_mode = 1; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(20); dib0700_ctrl_clock(adap->dev, 72, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &tfe7790p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &tfe7790p_dib7000p_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int tfe7790p_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_tuner(adap->fe_adap[0].fe); tfe7790p_dib0090_config.reset = st->dib7000p_ops.tuner_sleep; tfe7790p_dib0090_config.sleep = st->dib7000p_ops.tuner_sleep; tfe7790p_dib0090_config.get_adc_power = st->dib7000p_ops.get_adc_power; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe7790p_dib0090_config) == NULL) return -ENODEV; st->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup; return 0; } /* STK7070PD */ static struct dib7000p_config stk7070pd_dib7000p_config[2] = { { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &dib7070_agc_config, .bw = &dib7070_bw_config_12_mhz, .tuner_is_baseband = 1, .spur_protect = 1, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, }, { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &dib7070_agc_config, .bw = &dib7070_bw_config_12_mhz, .tuner_is_baseband = 1, .spur_protect = 1, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, } }; static void stk7070pd_init(struct dvb_usb_device *dev) { dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(dev, 72, 1); msleep(10); dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1); } static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; stk7070pd_init(adap->dev); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 2, 18, stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &stk7070pd_dib7000p_config[0]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x82, &stk7070pd_dib7000p_config[1]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int novatd_read_status_override(struct dvb_frontend *fe, enum fe_status *stat) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dvb_usb_device *dev = adap->dev; struct dib0700_state *state = dev->priv; int ret; ret = state->read_status(fe, stat); if (!ret) dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, !!(*stat & FE_HAS_LOCK)); return ret; } static int novatd_sleep_override(struct dvb_frontend* fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dvb_usb_device *dev = adap->dev; struct dib0700_state *state = dev->priv; /* turn off LED */ dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0); return state->sleep(fe); } /* * novatd_frontend_attach - Nova-TD specific attach * * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for * information purposes. */ static int novatd_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *dev = adap->dev; struct dib0700_state *st = dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (adap->id == 0) { stk7070pd_init(dev); /* turn the power LED on, the other two off (just in case) */ dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0); dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0); dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&dev->i2c_adap, 2, 18, stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } adap->fe_adap[0].fe = state->dib7000p_ops.init(&dev->i2c_adap, adap->id == 0 ? 0x80 : 0x82, &stk7070pd_dib7000p_config[adap->id]); if (adap->fe_adap[0].fe == NULL) return -ENODEV; st->read_status = adap->fe_adap[0].fe->ops.read_status; adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override; st->sleep = adap->fe_adap[0].fe->ops.sleep; adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override; return 0; } /* S5H1411 */ static struct s5h1411_config pinnacle_801e_config = { .output_mode = S5H1411_PARALLEL_OUTPUT, .gpio = S5H1411_GPIO_OFF, .mpeg_timing = S5H1411_MPEGTIMING_NONCONTINUOUS_NONINVERTING_CLOCK, .qam_if = S5H1411_IF_44000, .vsb_if = S5H1411_IF_44000, .inversion = S5H1411_INVERSION_OFF, .status_mode = S5H1411_DEMODLOCKING }; /* Pinnacle PCTV HD Pro 801e GPIOs map: GPIO0 - currently unknown GPIO1 - xc5000 tuner reset GPIO2 - CX25843 sleep GPIO3 - currently unknown GPIO4 - currently unknown GPIO6 - currently unknown GPIO7 - currently unknown GPIO9 - currently unknown GPIO10 - CX25843 reset */ static int s5h1411_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; /* Make use of the new i2c functions from FW 1.20 */ st->fw_use_new_i2c_api = 1; /* The s5h1411 requires the dib0700 to not be in master mode */ st->disable_streaming_master_mode = 1; /* All msleep values taken from Windows USB trace */ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 0); dib0700_set_gpio(adap->dev, GPIO3, GPIO_OUT, 0); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(400); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(60); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 0); msleep(30); /* Put the CX25843 to sleep for now since we're in digital mode */ dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 1); /* GPIOs are initialized, do the attach */ adap->fe_adap[0].fe = dvb_attach(s5h1411_attach, &pinnacle_801e_config, &adap->dev->i2c_adap); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int dib0700_xc5000_tuner_callback(void *priv, int component, int command, int arg) { struct dvb_usb_adapter *adap = priv; if (command == XC5000_TUNER_RESET) { /* Reset the tuner */ dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 1); msleep(10); } else { err("xc5000: unknown tuner callback command: %d\n", command); return -EINVAL; } return 0; } static struct xc5000_config s5h1411_xc5000_tunerconfig = { .i2c_address = 0x64, .if_khz = 5380, }; static int xc5000_tuner_attach(struct dvb_usb_adapter *adap) { /* FIXME: generalize & move to common area */ adap->fe_adap[0].fe->callback = dib0700_xc5000_tuner_callback; return dvb_attach(xc5000_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &s5h1411_xc5000_tunerconfig) == NULL ? -ENODEV : 0; } static int dib0700_xc4000_tuner_callback(void *priv, int component, int command, int arg) { struct dvb_usb_adapter *adap = priv; struct dib0700_adapter_state *state = adap->priv; if (command == XC4000_TUNER_RESET) { /* Reset the tuner */ state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 0); msleep(10); state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); } else { err("xc4000: unknown tuner callback command: %d\n", command); return -EINVAL; } return 0; } static struct dibx000_agc_config stk7700p_7000p_xc4000_agc_config = { .band_caps = BAND_UHF | BAND_VHF, .setup = 0x64, .inv_gain = 0x02c8, .time_stabiliz = 0x15, .alpha_level = 0x00, .thlock = 0x76, .wbd_inv = 0x01, .wbd_ref = 0x0b33, .wbd_sel = 0x00, .wbd_alpha = 0x02, .agc1_max = 0x00, .agc1_min = 0x00, .agc2_max = 0x9b26, .agc2_min = 0x26ca, .agc1_pt1 = 0x00, .agc1_pt2 = 0x00, .agc1_pt3 = 0x00, .agc1_slope1 = 0x00, .agc1_slope2 = 0x00, .agc2_pt1 = 0x00, .agc2_pt2 = 0x80, .agc2_slope1 = 0x1d, .agc2_slope2 = 0x1d, .alpha_mant = 0x11, .alpha_exp = 0x1b, .beta_mant = 0x17, .beta_exp = 0x33, .perform_agc_softsplit = 0x00, }; static struct dibx000_bandwidth_config stk7700p_xc4000_pll_config = { .internal = 60000, .sampling = 30000, .pll_prediv = 1, .pll_ratio = 8, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 0, .sad_cfg = (3 << 14) | (1 << 12) | 524, /* sad_cfg: refsel, sel, freq_15k */ .ifreq = 39370534, .timf = 20452225, .xtal_hz = 30000000 }; /* FIXME: none of these inputs are validated yet */ static struct dib7000p_config pctv_340e_config = { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &stk7700p_7000p_xc4000_agc_config, .bw = &stk7700p_xc4000_pll_config, .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS, }; /* PCTV 340e GPIOs map: dib0700: GPIO2 - CX25843 sleep GPIO3 - CS5340 reset GPIO5 - IRD GPIO6 - Power Supply GPIO8 - LNA (1=off 0=on) GPIO10 - CX25843 reset dib7000: GPIO8 - xc4000 reset */ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; /* Power Supply on */ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(50); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(100); /* Allow power supply to settle before probing */ /* cx25843 reset */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(1); /* cx25843 datasheet say 350us required */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); /* LNA off for now */ dib0700_set_gpio(adap->dev, GPIO8, GPIO_OUT, 1); /* Put the CX25843 to sleep for now since we're in digital mode */ dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 1); /* FIXME: not verified yet */ dib0700_ctrl_clock(adap->dev, 72, 1); msleep(500); if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) { /* Demodulator not found for some reason? */ dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x12, &pctv_340e_config); st->is_dib7000pc = 1; return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static struct xc4000_config dib7000p_xc4000_tunerconfig = { .i2c_address = 0x61, .default_pm = 1, .dvb_amplitude = 0, .set_smoothedcvbs = 0, .if_khz = 5400 }; static int xc4000_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *tun_i2c; struct dib0700_adapter_state *state = adap->priv; /* The xc4000 is not on the main i2c bus */ tun_i2c = state->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (tun_i2c == NULL) { printk(KERN_ERR "Could not reach tuner i2c bus\n"); return 0; } /* Setup the reset callback */ adap->fe_adap[0].fe->callback = dib0700_xc4000_tuner_callback; return dvb_attach(xc4000_attach, adap->fe_adap[0].fe, tun_i2c, &dib7000p_xc4000_tunerconfig) == NULL ? -ENODEV : 0; } static struct lgdt3305_config hcw_lgdt3305_config = { .i2c_addr = 0x0e, .mpeg_mode = LGDT3305_MPEG_PARALLEL, .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE, .tpvalid_polarity = LGDT3305_TP_VALID_LOW, .deny_i2c_rptr = 0, .spectral_inversion = 1, .qam_if_khz = 6000, .vsb_if_khz = 6000, .usref_8vsb = 0x0500, }; static struct mxl5007t_config hcw_mxl5007t_config = { .xtal_freq_hz = MxL_XTAL_25_MHZ, .if_freq_hz = MxL_IF_6_MHZ, .invert_if = 1, }; /* TIGER-ATSC map: GPIO0 - LNA_CTR (H: LNA power enabled, L: LNA power disabled) GPIO1 - ANT_SEL (H: VPA, L: MCX) GPIO4 - SCL2 GPIO6 - EN_TUNER GPIO7 - SDA2 GPIO10 - DEM_RST MXL is behind LG's i2c repeater. LG is on SCL2/SDA2 gpios on the DIB */ static int lgdt3305_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; /* Make use of the new i2c functions from FW 1.20 */ st->fw_use_new_i2c_api = 1; st->disable_streaming_master_mode = 1; /* fe power enable */ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(30); /* demod reset */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); adap->fe_adap[0].fe = dvb_attach(lgdt3305_attach, &hcw_lgdt3305_config, &adap->dev->i2c_adap); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int mxl5007t_tuner_attach(struct dvb_usb_adapter *adap) { return dvb_attach(mxl5007t_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, 0x60, &hcw_mxl5007t_config) == NULL ? -ENODEV : 0; } static int xbox_one_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct i2c_client *client_demod, *client_tuner; struct dvb_usb_device *d = adap->dev; struct mn88472_config mn88472_config = { }; struct tda18250_config tda18250_config; struct i2c_board_info info; st->fw_use_new_i2c_api = 1; st->disable_streaming_master_mode = 1; /* fe power enable */ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(30); /* demod reset */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); /* attach demod */ mn88472_config.fe = &adap->fe_adap[0].fe; mn88472_config.i2c_wr_max = 22; mn88472_config.xtal = 20500000; mn88472_config.ts_mode = PARALLEL_TS_MODE; mn88472_config.ts_clock = FIXED_TS_CLOCK; memset(&info, 0, sizeof(struct i2c_board_info)); strscpy(info.type, "mn88472", I2C_NAME_SIZE); info.addr = 0x18; info.platform_data = &mn88472_config; request_module(info.type); client_demod = i2c_new_client_device(&d->i2c_adap, &info); if (!i2c_client_has_driver(client_demod)) goto fail_demod_device; if (!try_module_get(client_demod->dev.driver->owner)) goto fail_demod_module; st->i2c_client_demod = client_demod; adap->fe_adap[0].fe = mn88472_config.get_dvb_frontend(client_demod); /* attach tuner */ memset(&tda18250_config, 0, sizeof(tda18250_config)); tda18250_config.if_dvbt_6 = 3950; tda18250_config.if_dvbt_7 = 4450; tda18250_config.if_dvbt_8 = 4950; tda18250_config.if_dvbc_6 = 4950; tda18250_config.if_dvbc_8 = 4950; tda18250_config.if_atsc = 4079; tda18250_config.loopthrough = true; tda18250_config.xtal_freq = TDA18250_XTAL_FREQ_27MHZ; tda18250_config.fe = adap->fe_adap[0].fe; memset(&info, 0, sizeof(struct i2c_board_info)); strscpy(info.type, "tda18250", I2C_NAME_SIZE); info.addr = 0x60; info.platform_data = &tda18250_config; request_module(info.type); client_tuner = i2c_new_client_device(&adap->dev->i2c_adap, &info); if (!i2c_client_has_driver(client_tuner)) goto fail_tuner_device; if (!try_module_get(client_tuner->dev.driver->owner)) goto fail_tuner_module; st->i2c_client_tuner = client_tuner; return 0; fail_tuner_module: i2c_unregister_device(client_tuner); fail_tuner_device: module_put(client_demod->dev.driver->owner); fail_demod_module: i2c_unregister_device(client_demod); fail_demod_device: return -ENODEV; } /* DVB-USB and USB stuff follows */ enum { DIBCOM_STK7700P, DIBCOM_STK7700P_PC, HAUPPAUGE_NOVA_T_500, HAUPPAUGE_NOVA_T_500_2, HAUPPAUGE_NOVA_T_STICK, AVERMEDIA_VOLAR, COMPRO_VIDEOMATE_U500, UNIWILL_STK7700P, LEADTEK_WINFAST_DTV_DONGLE_STK7700P, HAUPPAUGE_NOVA_T_STICK_2, AVERMEDIA_VOLAR_2, PINNACLE_PCTV2000E, TERRATEC_CINERGY_DT_XS_DIVERSITY, HAUPPAUGE_NOVA_TD_STICK, DIBCOM_STK7700D, DIBCOM_STK7070P, PINNACLE_PCTV_DVB_T_FLASH, DIBCOM_STK7070PD, PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T, COMPRO_VIDEOMATE_U500_PC, AVERMEDIA_EXPRESS, GIGABYTE_U7000, ULTIMA_ARTEC_T14BR, ASUS_U3000, ASUS_U3100, HAUPPAUGE_NOVA_T_STICK_3, HAUPPAUGE_MYTV_T, TERRATEC_CINERGY_HT_USB_XE, PINNACLE_EXPRESSCARD_320CX, PINNACLE_PCTV72E, PINNACLE_PCTV73E, YUAN_EC372S, TERRATEC_CINERGY_HT_EXPRESS, TERRATEC_CINERGY_T_XXS, LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2, HAUPPAUGE_NOVA_TD_STICK_52009, HAUPPAUGE_NOVA_T_500_3, GIGABYTE_U8000, YUAN_STK7700PH, ASUS_U3000H, PINNACLE_PCTV801E, PINNACLE_PCTV801E_SE, TERRATEC_CINERGY_T_EXPRESS, TERRATEC_CINERGY_DT_XS_DIVERSITY_2, SONY_PLAYTV, YUAN_PD378S, HAUPPAUGE_TIGER_ATSC, HAUPPAUGE_TIGER_ATSC_B210, YUAN_MC770, ELGATO_EYETV_DTT, ELGATO_EYETV_DTT_Dlx, LEADTEK_WINFAST_DTV_DONGLE_H, TERRATEC_T3, TERRATEC_T5, YUAN_STK7700D, YUAN_STK7700D_2, PINNACLE_PCTV73A, PCTV_PINNACLE_PCTV73ESE, PCTV_PINNACLE_PCTV282E, DIBCOM_STK7770P, TERRATEC_CINERGY_T_XXS_2, DIBCOM_STK807XPVR, DIBCOM_STK807XP, PIXELVIEW_SBTVD, EVOLUTEPC_TVWAY_PLUS, PINNACLE_PCTV73ESE, PINNACLE_PCTV282E, DIBCOM_STK8096GP, ELGATO_EYETV_DIVERSITY, DIBCOM_NIM9090M, DIBCOM_NIM8096MD, DIBCOM_NIM9090MD, DIBCOM_NIM7090, DIBCOM_TFE7090PVR, TECHNISAT_AIRSTAR_TELESTICK_2, MEDION_CREATIX_CTX1921, PINNACLE_PCTV340E, PINNACLE_PCTV340E_SE, DIBCOM_TFE7790P, DIBCOM_TFE8096P, ELGATO_EYETV_DTT_2, PCTV_2002E, PCTV_2002E_SE, PCTV_DIBCOM_STK8096PVR, DIBCOM_STK8096PVR, HAMA_DVBT_HYBRID, MICROSOFT_XBOX_ONE_TUNER, }; struct usb_device_id dib0700_usb_id_table[] = { DVB_USB_DEV(DIBCOM, DIBCOM_STK7700P), DVB_USB_DEV(DIBCOM, DIBCOM_STK7700P_PC), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_500), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_500_2), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_STICK), DVB_USB_DEV(AVERMEDIA, AVERMEDIA_VOLAR), DVB_USB_DEV(COMPRO, COMPRO_VIDEOMATE_U500), DVB_USB_DEV(UNIWILL, UNIWILL_STK7700P), DVB_USB_DEV(LEADTEK, LEADTEK_WINFAST_DTV_DONGLE_STK7700P), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_STICK_2), DVB_USB_DEV(AVERMEDIA, AVERMEDIA_VOLAR_2), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV2000E), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_DT_XS_DIVERSITY), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_TD_STICK), DVB_USB_DEV(DIBCOM, DIBCOM_STK7700D), DVB_USB_DEV(DIBCOM, DIBCOM_STK7070P), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_DVB_T_FLASH), DVB_USB_DEV(DIBCOM, DIBCOM_STK7070PD), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T), DVB_USB_DEV(COMPRO, COMPRO_VIDEOMATE_U500_PC), DVB_USB_DEV(AVERMEDIA, AVERMEDIA_EXPRESS), DVB_USB_DEV(GIGABYTE, GIGABYTE_U7000), DVB_USB_DEV(ULTIMA_ELECTRONIC, ULTIMA_ARTEC_T14BR), DVB_USB_DEV(ASUS, ASUS_U3000), DVB_USB_DEV(ASUS, ASUS_U3100), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_STICK_3), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_MYTV_T), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_HT_USB_XE), DVB_USB_DEV(PINNACLE, PINNACLE_EXPRESSCARD_320CX), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV72E), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV73E), DVB_USB_DEV(YUAN, YUAN_EC372S), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_HT_EXPRESS), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T_XXS), DVB_USB_DEV(LEADTEK, LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_TD_STICK_52009), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_500_3), DVB_USB_DEV(GIGABYTE, GIGABYTE_U8000), DVB_USB_DEV(YUAN, YUAN_STK7700PH), DVB_USB_DEV(ASUS, ASUS_U3000H), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV801E), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV801E_SE), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T_EXPRESS), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_DT_XS_DIVERSITY_2), DVB_USB_DEV(SONY, SONY_PLAYTV), DVB_USB_DEV(YUAN, YUAN_PD378S), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_TIGER_ATSC), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_TIGER_ATSC_B210), DVB_USB_DEV(YUAN, YUAN_MC770), DVB_USB_DEV(ELGATO, ELGATO_EYETV_DTT), DVB_USB_DEV(ELGATO, ELGATO_EYETV_DTT_Dlx), DVB_USB_DEV(LEADTEK, LEADTEK_WINFAST_DTV_DONGLE_H), DVB_USB_DEV(TERRATEC, TERRATEC_T3), DVB_USB_DEV(TERRATEC, TERRATEC_T5), DVB_USB_DEV(YUAN, YUAN_STK7700D), DVB_USB_DEV(YUAN, YUAN_STK7700D_2), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV73A), DVB_USB_DEV(PCTV, PCTV_PINNACLE_PCTV73ESE), DVB_USB_DEV(PCTV, PCTV_PINNACLE_PCTV282E), DVB_USB_DEV(DIBCOM, DIBCOM_STK7770P), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T_XXS_2), DVB_USB_DEV(DIBCOM, DIBCOM_STK807XPVR), DVB_USB_DEV(DIBCOM, DIBCOM_STK807XP), DVB_USB_DEV_VER(PIXELVIEW, PIXELVIEW_SBTVD, 0x000, 0x3f00), DVB_USB_DEV(EVOLUTEPC, EVOLUTEPC_TVWAY_PLUS), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV73ESE), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV282E), DVB_USB_DEV(DIBCOM, DIBCOM_STK8096GP), DVB_USB_DEV(ELGATO, ELGATO_EYETV_DIVERSITY), DVB_USB_DEV(DIBCOM, DIBCOM_NIM9090M), DVB_USB_DEV(DIBCOM, DIBCOM_NIM8096MD), DVB_USB_DEV(DIBCOM, DIBCOM_NIM9090MD), DVB_USB_DEV(DIBCOM, DIBCOM_NIM7090), DVB_USB_DEV(DIBCOM, DIBCOM_TFE7090PVR), DVB_USB_DEV(TECHNISAT, TECHNISAT_AIRSTAR_TELESTICK_2), DVB_USB_DEV(MEDION, MEDION_CREATIX_CTX1921), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV340E), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV340E_SE), DVB_USB_DEV(DIBCOM, DIBCOM_TFE7790P), DVB_USB_DEV(DIBCOM, DIBCOM_TFE8096P), DVB_USB_DEV(ELGATO, ELGATO_EYETV_DTT_2), DVB_USB_DEV(PCTV, PCTV_2002E), DVB_USB_DEV(PCTV, PCTV_2002E_SE), DVB_USB_DEV(PCTV, PCTV_DIBCOM_STK8096PVR), DVB_USB_DEV(DIBCOM, DIBCOM_STK8096PVR), DVB_USB_DEV(HAMA, HAMA_DVBT_HYBRID), DVB_USB_DEV(MICROSOFT, MICROSOFT_XBOX_ONE_TUNER), { } }; MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table); #define DIB0700_DEFAULT_DEVICE_PROPERTIES \ .caps = DVB_USB_IS_AN_I2C_ADAPTER, \ .usb_ctrl = DEVICE_SPECIFIC, \ .firmware = "dvb-usb-dib0700-1.20.fw", \ .download_firmware = dib0700_download_firmware, \ .no_reconnect = 1, \ .size_of_priv = sizeof(struct dib0700_state), \ .i2c_algo = &dib0700_i2c_algo, \ .identify_state = dib0700_identify_state #define DIB0700_DEFAULT_STREAMING_CONFIG(ep) \ .streaming_ctrl = dib0700_streaming_ctrl, \ .stream = { \ .type = USB_BULK, \ .count = 4, \ .endpoint = ep, \ .u = { \ .bulk = { \ .buffersize = 39480, \ } \ } \ } #define DIB0700_NUM_FRONTENDS(n) \ .num_frontends = n, \ .size_of_priv = sizeof(struct dib0700_adapter_state) struct dvb_usb_device_properties dib0700_devices[] = { { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk7700p_pid_filter, .pid_filter_ctrl = stk7700p_pid_filter_ctrl, .frontend_attach = stk7700p_frontend_attach, .tuner_attach = stk7700p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 8, .devices = { { "DiBcom STK7700P reference design", { &dib0700_usb_id_table[DIBCOM_STK7700P], &dib0700_usb_id_table[DIBCOM_STK7700P_PC] }, { NULL }, }, { "Hauppauge Nova-T Stick", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_STICK], &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_STICK_2], NULL }, { NULL }, }, { "AVerMedia AVerTV DVB-T Volar", { &dib0700_usb_id_table[AVERMEDIA_VOLAR], &dib0700_usb_id_table[AVERMEDIA_VOLAR_2] }, { NULL }, }, { "Compro Videomate U500", { &dib0700_usb_id_table[COMPRO_VIDEOMATE_U500], &dib0700_usb_id_table[COMPRO_VIDEOMATE_U500_PC] }, { NULL }, }, { "Uniwill STK7700P based (Hama and others)", { &dib0700_usb_id_table[UNIWILL_STK7700P], NULL }, { NULL }, }, { "Leadtek Winfast DTV Dongle (STK7700P based)", { &dib0700_usb_id_table[LEADTEK_WINFAST_DTV_DONGLE_STK7700P], &dib0700_usb_id_table[LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2] }, { NULL }, }, { "AVerMedia AVerTV DVB-T Express", { &dib0700_usb_id_table[AVERMEDIA_EXPRESS] }, { NULL }, }, { "Gigabyte U7000", { &dib0700_usb_id_table[GIGABYTE_U7000], NULL }, { NULL }, } }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = bristol_frontend_attach, .tuner_attach = bristol_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = bristol_frontend_attach, .tuner_attach = bristol_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 1, .devices = { { "Hauppauge Nova-T 500 Dual DVB-T", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_500], &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_500_2], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7700d_frontend_attach, .tuner_attach = stk7700d_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7700d_frontend_attach, .tuner_attach = stk7700d_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 5, .devices = { { "Pinnacle PCTV 2000e", { &dib0700_usb_id_table[PINNACLE_PCTV2000E], NULL }, { NULL }, }, { "Terratec Cinergy DT XS Diversity", { &dib0700_usb_id_table[TERRATEC_CINERGY_DT_XS_DIVERSITY], NULL }, { NULL }, }, { "Hauppauge Nova-TD Stick/Elgato Eye-TV Diversity", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_TD_STICK], NULL }, { NULL }, }, { "DiBcom STK7700D reference design", { &dib0700_usb_id_table[DIBCOM_STK7700D], NULL }, { NULL }, }, { "YUAN High-Tech DiBcom STK7700D", { &dib0700_usb_id_table[YUAN_STK7700D_2], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7700P2_frontend_attach, .tuner_attach = stk7700d_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 3, .devices = { { "ASUS My Cinema U3000 Mini DVBT Tuner", { &dib0700_usb_id_table[ASUS_U3000], NULL }, { NULL }, }, { "Yuan EC372S", { &dib0700_usb_id_table[YUAN_EC372S], NULL }, { NULL }, }, { "Terratec Cinergy T Express", { &dib0700_usb_id_table[TERRATEC_CINERGY_T_EXPRESS], NULL }, { NULL }, } }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070p_frontend_attach, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 12, .devices = { { "DiBcom STK7070P reference design", { &dib0700_usb_id_table[DIBCOM_STK7070P], NULL }, { NULL }, }, { "Pinnacle PCTV DVB-T Flash Stick", { &dib0700_usb_id_table[PINNACLE_PCTV_DVB_T_FLASH], NULL }, { NULL }, }, { "Artec T14BR DVB-T", { &dib0700_usb_id_table[ULTIMA_ARTEC_T14BR], NULL }, { NULL }, }, { "ASUS My Cinema U3100 Mini DVBT Tuner", { &dib0700_usb_id_table[ASUS_U3100], NULL }, { NULL }, }, { "Hauppauge Nova-T Stick", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_STICK_3], NULL }, { NULL }, }, { "Hauppauge Nova-T MyTV.t", { &dib0700_usb_id_table[HAUPPAUGE_MYTV_T], NULL }, { NULL }, }, { "Pinnacle PCTV 72e", { &dib0700_usb_id_table[PINNACLE_PCTV72E], NULL }, { NULL }, }, { "Pinnacle PCTV 73e", { &dib0700_usb_id_table[PINNACLE_PCTV73E], NULL }, { NULL }, }, { "Elgato EyeTV DTT", { &dib0700_usb_id_table[ELGATO_EYETV_DTT], NULL }, { NULL }, }, { "Yuan PD378S", { &dib0700_usb_id_table[YUAN_PD378S], NULL }, { NULL }, }, { "Elgato EyeTV Dtt Dlx PD378S", { &dib0700_usb_id_table[ELGATO_EYETV_DTT_Dlx], NULL }, { NULL }, }, { "Elgato EyeTV DTT rev. 2", { &dib0700_usb_id_table[ELGATO_EYETV_DTT_2], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070p_frontend_attach, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 3, .devices = { { "Pinnacle PCTV 73A", { &dib0700_usb_id_table[PINNACLE_PCTV73A], NULL }, { NULL }, }, { "Pinnacle PCTV 73e SE", { &dib0700_usb_id_table[PCTV_PINNACLE_PCTV73ESE], &dib0700_usb_id_table[PINNACLE_PCTV73ESE], NULL }, { NULL }, }, { "Pinnacle PCTV 282e", { &dib0700_usb_id_table[PCTV_PINNACLE_PCTV282E], &dib0700_usb_id_table[PINNACLE_PCTV282E], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = novatd_frontend_attach, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = novatd_frontend_attach, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 3, .devices = { { "Hauppauge Nova-TD Stick (52009)", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_TD_STICK_52009], NULL }, { NULL }, }, { "PCTV 2002e", { &dib0700_usb_id_table[PCTV_2002E], NULL }, { NULL }, }, { "PCTV 2002e SE", { &dib0700_usb_id_table[PCTV_2002E_SE], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070pd_frontend_attach0, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070pd_frontend_attach1, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 5, .devices = { { "DiBcom STK7070PD reference design", { &dib0700_usb_id_table[DIBCOM_STK7070PD], NULL }, { NULL }, }, { "Pinnacle PCTV Dual DVB-T Diversity Stick", { &dib0700_usb_id_table[PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T], NULL }, { NULL }, }, { "Hauppauge Nova-TD-500 (84xxx)", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_500_3], NULL }, { NULL }, }, { "Terratec Cinergy DT USB XS Diversity/ T5", { &dib0700_usb_id_table[TERRATEC_CINERGY_DT_XS_DIVERSITY_2], &dib0700_usb_id_table[TERRATEC_T5], NULL}, { NULL }, }, { "Sony PlayTV", { &dib0700_usb_id_table[SONY_PLAYTV], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070pd_frontend_attach0, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070pd_frontend_attach1, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 1, .devices = { { "Elgato EyeTV Diversity", { &dib0700_usb_id_table[ELGATO_EYETV_DIVERSITY], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_NEC_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7700ph_frontend_attach, .tuner_attach = stk7700ph_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 10, .devices = { { "Terratec Cinergy HT USB XE", { &dib0700_usb_id_table[TERRATEC_CINERGY_HT_USB_XE], NULL }, { NULL }, }, { "Pinnacle Expresscard 320cx", { &dib0700_usb_id_table[PINNACLE_EXPRESSCARD_320CX], NULL }, { NULL }, }, { "Terratec Cinergy HT Express", { &dib0700_usb_id_table[TERRATEC_CINERGY_HT_EXPRESS], NULL }, { NULL }, }, { "Gigabyte U8000-RH", { &dib0700_usb_id_table[GIGABYTE_U8000], NULL }, { NULL }, }, { "YUAN High-Tech STK7700PH", { &dib0700_usb_id_table[YUAN_STK7700PH], NULL }, { NULL }, }, { "Asus My Cinema-U3000Hybrid", { &dib0700_usb_id_table[ASUS_U3000H], NULL }, { NULL }, }, { "YUAN High-Tech MC770", { &dib0700_usb_id_table[YUAN_MC770], NULL }, { NULL }, }, { "Leadtek WinFast DTV Dongle H", { &dib0700_usb_id_table[LEADTEK_WINFAST_DTV_DONGLE_H], NULL }, { NULL }, }, { "YUAN High-Tech STK7700D", { &dib0700_usb_id_table[YUAN_STK7700D], NULL }, { NULL }, }, { "Hama DVB=T Hybrid USB Stick", { &dib0700_usb_id_table[HAMA_DVBT_HYBRID], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = s5h1411_frontend_attach, .tuner_attach = xc5000_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 2, .devices = { { "Pinnacle PCTV HD Pro USB Stick", { &dib0700_usb_id_table[PINNACLE_PCTV801E], NULL }, { NULL }, }, { "Pinnacle PCTV HD USB Stick", { &dib0700_usb_id_table[PINNACLE_PCTV801E_SE], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = lgdt3305_frontend_attach, .tuner_attach = mxl5007t_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 2, .devices = { { "Hauppauge ATSC MiniCard (B200)", { &dib0700_usb_id_table[HAUPPAUGE_TIGER_ATSC], NULL }, { NULL }, }, { "Hauppauge ATSC MiniCard (B210)", { &dib0700_usb_id_table[HAUPPAUGE_TIGER_ATSC_B210], NULL }, { NULL }, }, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7770p_frontend_attach, .tuner_attach = dib7770p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 4, .devices = { { "DiBcom STK7770P reference design", { &dib0700_usb_id_table[DIBCOM_STK7770P], NULL }, { NULL }, }, { "Terratec Cinergy T USB XXS (HD)/ T3", { &dib0700_usb_id_table[TERRATEC_CINERGY_T_XXS], &dib0700_usb_id_table[TERRATEC_T3], &dib0700_usb_id_table[TERRATEC_CINERGY_T_XXS_2], NULL}, { NULL }, }, { "TechniSat AirStar TeleStick 2", { &dib0700_usb_id_table[TECHNISAT_AIRSTAR_TELESTICK_2], NULL }, { NULL }, }, { "Medion CTX1921 DVB-T USB", { &dib0700_usb_id_table[MEDION_CREATIX_CTX1921], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk807x_frontend_attach, .tuner_attach = dib807x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 3, .devices = { { "DiBcom STK807xP reference design", { &dib0700_usb_id_table[DIBCOM_STK807XP], NULL }, { NULL }, }, { "Prolink Pixelview SBTVD", { &dib0700_usb_id_table[PIXELVIEW_SBTVD], NULL }, { NULL }, }, { "EvolutePC TVWay+", { &dib0700_usb_id_table[EVOLUTEPC_TVWAY_PLUS], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_NEC_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk807xpvr_frontend_attach0, .tuner_attach = dib807x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk807xpvr_frontend_attach1, .tuner_attach = dib807x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom STK807xPVR reference design", { &dib0700_usb_id_table[DIBCOM_STK807XPVR], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk809x_frontend_attach, .tuner_attach = dib809x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom STK8096GP reference design", { &dib0700_usb_id_table[DIBCOM_STK8096GP], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = dib90x0_pid_filter, .pid_filter_ctrl = dib90x0_pid_filter_ctrl, .frontend_attach = stk9090m_frontend_attach, .tuner_attach = dib9090_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom STK9090M reference design", { &dib0700_usb_id_table[DIBCOM_NIM9090M], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = nim8096md_frontend_attach, .tuner_attach = nim8096md_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom NIM8096MD reference design", { &dib0700_usb_id_table[DIBCOM_NIM8096MD], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = dib90x0_pid_filter, .pid_filter_ctrl = dib90x0_pid_filter_ctrl, .frontend_attach = nim9090md_frontend_attach, .tuner_attach = nim9090md_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom NIM9090MD reference design", { &dib0700_usb_id_table[DIBCOM_NIM9090MD], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = nim7090_frontend_attach, .tuner_attach = nim7090_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom NIM7090 reference design", { &dib0700_usb_id_table[DIBCOM_NIM7090], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = tfe7090pvr_frontend0_attach, .tuner_attach = tfe7090pvr_tuner0_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = tfe7090pvr_frontend1_attach, .tuner_attach = tfe7090pvr_tuner1_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom TFE7090PVR reference design", { &dib0700_usb_id_table[DIBCOM_TFE7090PVR], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = pctv340e_frontend_attach, .tuner_attach = xc4000_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 2, .devices = { { "Pinnacle PCTV 340e HD Pro USB Stick", { &dib0700_usb_id_table[PINNACLE_PCTV340E], NULL }, { NULL }, }, { "Pinnacle PCTV Hybrid Stick Solo", { &dib0700_usb_id_table[PINNACLE_PCTV340E_SE], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = tfe7790p_frontend_attach, .tuner_attach = tfe7790p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), } }, }, }, .num_device_descs = 1, .devices = { { "DiBcom TFE7790P reference design", { &dib0700_usb_id_table[DIBCOM_TFE7790P], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = tfe8096p_frontend_attach, .tuner_attach = tfe8096p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), } }, }, }, .num_device_descs = 1, .devices = { { "DiBcom TFE8096P reference design", { &dib0700_usb_id_table[DIBCOM_TFE8096P], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk809x_frontend_attach, .tuner_attach = dib809x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), } }, .size_of_priv = sizeof(struct dib0700_adapter_state), }, { .num_frontends = 1, .fe = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk809x_frontend1_attach, .tuner_attach = dib809x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), } }, .size_of_priv = sizeof(struct dib0700_adapter_state), }, }, .num_device_descs = 1, .devices = { { "DiBcom STK8096-PVR reference design", { &dib0700_usb_id_table[PCTV_DIBCOM_STK8096PVR], &dib0700_usb_id_table[DIBCOM_STK8096PVR], NULL}, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = xbox_one_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x82), } }, }, }, .num_device_descs = 1, .devices = { { "Microsoft Xbox One Digital TV Tuner", { &dib0700_usb_id_table[MICROSOFT_XBOX_ONE_TUNER], NULL }, { NULL }, }, }, }, }; int dib0700_device_count = ARRAY_SIZE(dib0700_devices); |
25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 14 14 14 3216 3196 3203 1438 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 | // SPDX-License-Identifier: GPL-2.0-only /* * mm/percpu-vm.c - vmalloc area based chunk allocation * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * * Chunks are mapped into vmalloc areas and populated page by page. * This is the default chunk allocator. */ #include "internal.h" static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) { /* must not be used on pre-mapped chunk */ WARN_ON(chunk->immutable); return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); } /** * pcpu_get_pages - get temp pages array * * Returns pointer to array of pointers to struct page which can be indexed * with pcpu_page_idx(). Note that there is only one array and accesses * should be serialized by pcpu_alloc_mutex. * * RETURNS: * Pointer to temp pages array on success. */ static struct page **pcpu_get_pages(void) { static struct page **pages; size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); lockdep_assert_held(&pcpu_alloc_mutex); if (!pages) pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); return pages; } /** * pcpu_free_pages - free pages which were allocated for @chunk * @chunk: chunk pages were allocated for * @pages: array of pages to be freed, indexed by pcpu_page_idx() * @page_start: page index of the first page to be freed * @page_end: page index of the last page to be freed + 1 * * Free pages [@page_start and @page_end) in @pages for all units. * The pages were allocated for @chunk. */ static void pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu; int i; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page *page = pages[pcpu_page_idx(cpu, i)]; if (page) __free_page(page); } } } /** * pcpu_alloc_pages - allocates pages for @chunk * @chunk: target chunk * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() * @page_start: page index of the first page to be allocated * @page_end: page index of the last page to be allocated + 1 * @gfp: allocation flags passed to the underlying allocator * * Allocate pages [@page_start,@page_end) into @pages for all units. * The allocation is for @chunk. Percpu core doesn't care about the * content of @pages and will pass it verbatim to pcpu_map_pages(). */ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) { unsigned int cpu, tcpu; int i; gfp |= __GFP_HIGHMEM; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); if (!*pagep) goto err; } } return 0; err: while (--i >= page_start) __free_page(pages[pcpu_page_idx(cpu, i)]); for_each_possible_cpu(tcpu) { if (tcpu == cpu) break; for (i = page_start; i < page_end; i++) __free_page(pages[pcpu_page_idx(tcpu, i)]); } return -ENOMEM; } /** * pcpu_pre_unmap_flush - flush cache prior to unmapping * @chunk: chunk the regions to be flushed belongs to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages in [@page_start,@page_end) of @chunk are about to be * unmapped. Flush cache. As each flushing trial can be very * expensive, issue flush on the whole region at once rather than * doing it for each cpu. This could be an overkill but is more * scalable. */ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_cache_vunmap( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) { vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT)); } /** * pcpu_unmap_pages - unmap pages out of a pcpu_chunk * @chunk: chunk of interest * @pages: pages array which can be used to pass information to free * @page_start: page index of the first page to unmap * @page_end: page index of the last page to unmap + 1 * * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. * Corresponding elements in @pages were cleared by the caller and can * be used to carry information to pcpu_free_pages() which will be * called after all unmaps are finished. The caller should call * proper pre/post flush functions. */ static void pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu; int i; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page *page; page = pcpu_chunk_page(chunk, cpu, i); WARN_ON(!page); pages[pcpu_page_idx(cpu, i)] = page; } __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), page_end - page_start); } } /** * pcpu_post_unmap_tlb_flush - flush TLB after unmapping * @chunk: pcpu_chunk the regions to be flushed belong to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush * TLB for the regions. This can be skipped if the area is to be * returned to vmalloc as vmalloc will handle TLB flushing lazily. * * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once * for the whole region. */ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_tlb_kernel_range( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } static int __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) { return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT), PAGE_KERNEL, pages, PAGE_SHIFT); } /** * pcpu_map_pages - map pages into a pcpu_chunk * @chunk: chunk of interest * @pages: pages array containing pages to be mapped * @page_start: page index of the first page to map * @page_end: page index of the last page to map + 1 * * For each cpu, map pages [@page_start,@page_end) into @chunk. The * caller is responsible for calling pcpu_post_map_flush() after all * mappings are complete. * * This function is responsible for setting up whatever is necessary for * reverse lookup (addr -> chunk). */ static int pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu, tcpu; int i, err; for_each_possible_cpu(cpu) { err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), &pages[pcpu_page_idx(cpu, page_start)], page_end - page_start); if (err < 0) goto err; for (i = page_start; i < page_end; i++) pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], chunk); } return 0; err: for_each_possible_cpu(tcpu) { __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), page_end - page_start); if (tcpu == cpu) break; } pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); return err; } /** * pcpu_post_map_flush - flush cache after mapping * @chunk: pcpu_chunk the regions to be flushed belong to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages [@page_start,@page_end) of @chunk have been mapped. Flush * cache. * * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once * for the whole region. */ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_cache_vmap( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } /** * pcpu_populate_chunk - populate and map an area of a pcpu_chunk * @chunk: chunk of interest * @page_start: the start page * @page_end: the end page * @gfp: allocation flags passed to the underlying memory allocator * * For each cpu, populate and map pages [@page_start,@page_end) into * @chunk. * * CONTEXT: * pcpu_alloc_mutex, does GFP_KERNEL allocation. */ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) { struct page **pages; pages = pcpu_get_pages(); if (!pages) return -ENOMEM; if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) return -ENOMEM; if (pcpu_map_pages(chunk, pages, page_start, page_end)) { pcpu_free_pages(chunk, pages, page_start, page_end); return -ENOMEM; } pcpu_post_map_flush(chunk, page_start, page_end); return 0; } /** * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk * @chunk: chunk to depopulate * @page_start: the start page * @page_end: the end page * * For each cpu, depopulate and unmap pages [@page_start,@page_end) * from @chunk. * * Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the * region back to vmalloc() which will lazily flush the tlb. * * CONTEXT: * pcpu_alloc_mutex. */ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) { struct page **pages; /* * If control reaches here, there must have been at least one * successful population attempt so the temp pages array must * be available now. */ pages = pcpu_get_pages(); BUG_ON(!pages); /* unmap and free */ pcpu_pre_unmap_flush(chunk, page_start, page_end); pcpu_unmap_pages(chunk, pages, page_start, page_end); pcpu_free_pages(chunk, pages, page_start, page_end); } static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) { struct pcpu_chunk *chunk; struct vm_struct **vms; chunk = pcpu_alloc_chunk(gfp); if (!chunk) return NULL; vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, pcpu_nr_groups, pcpu_atom_size); if (!vms) { pcpu_free_chunk(chunk); return NULL; } chunk->data = vms; chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; pcpu_stats_chunk_alloc(); trace_percpu_create_chunk(chunk->base_addr); return chunk; } static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; pcpu_stats_chunk_dealloc(); trace_percpu_destroy_chunk(chunk->base_addr); if (chunk->data) pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); pcpu_free_chunk(chunk); } static struct page *pcpu_addr_to_page(void *addr) { return vmalloc_to_page(addr); } static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) { /* no extra restriction */ return 0; } /** * pcpu_should_reclaim_chunk - determine if a chunk should go into reclaim * @chunk: chunk of interest * * This is the entry point for percpu reclaim. If a chunk qualifies, it is then * isolated and managed in separate lists at the back of pcpu_slot: sidelined * and to_depopulate respectively. The to_depopulate list holds chunks slated * for depopulation. They no longer contribute to pcpu_nr_empty_pop_pages once * they are on this list. Once depopulated, they are moved onto the sidelined * list which enables them to be pulled back in for allocation if no other chunk * can suffice the allocation. */ static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk) { /* do not reclaim either the first chunk or reserved chunk */ if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk) return false; /* * If it is isolated, it may be on the sidelined list so move it back to * the to_depopulate list. If we hit at least 1/4 pages empty pages AND * there is no system-wide shortage of empty pages aside from this * chunk, move it to the to_depopulate list. */ return ((chunk->isolated && chunk->nr_empty_pop_pages) || (pcpu_nr_empty_pop_pages > (PCPU_EMPTY_POP_PAGES_HIGH + chunk->nr_empty_pop_pages) && chunk->nr_empty_pop_pages >= chunk->nr_pages / 4)); } |
80 154 154 5 154 2 151 2 152 2 151 123 116 51 51 151 150 4 51 128 128 151 13 151 2 10 2 2 2 8 10 10 10 87 87 85 85 86 1 84 1 1 85 85 85 85 87 1 1 1 3 87 3 3 3 3 3 2 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_STR_HASH_H #define _BCACHEFS_STR_HASH_H #include "btree_iter.h" #include "btree_update.h" #include "checksum.h" #include "error.h" #include "inode.h" #include "siphash.h" #include "subvolume.h" #include "super.h" #include <linux/crc32c.h> #include <crypto/sha2.h> static inline enum bch_str_hash_type bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt) { switch (opt) { case BCH_STR_HASH_OPT_crc32c: return BCH_STR_HASH_crc32c; case BCH_STR_HASH_OPT_crc64: return BCH_STR_HASH_crc64; case BCH_STR_HASH_OPT_siphash: return c->sb.features & (1ULL << BCH_FEATURE_new_siphash) ? BCH_STR_HASH_siphash : BCH_STR_HASH_siphash_old; default: BUG(); } } struct bch_hash_info { u8 type; struct unicode_map *cf_encoding; /* * For crc32 or crc64 string hashes the first key value of * the siphash_key (k0) is used as the key. */ SIPHASH_KEY siphash_key; }; static inline struct bch_hash_info bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi) { struct bch_hash_info info = { .type = INODE_STR_HASH(bi), #ifdef CONFIG_UNICODE .cf_encoding = bch2_inode_casefold(c, bi) ? c->cf_encoding : NULL, #endif .siphash_key = { .k0 = bi->bi_hash_seed } }; if (unlikely(info.type == BCH_STR_HASH_siphash_old)) { u8 digest[SHA256_DIGEST_SIZE]; sha256((const u8 *)&bi->bi_hash_seed, sizeof(bi->bi_hash_seed), digest); memcpy(&info.siphash_key, digest, sizeof(info.siphash_key)); } return info; } struct bch_str_hash_ctx { union { u32 crc32c; u64 crc64; SIPHASH_CTX siphash; }; }; static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx, const struct bch_hash_info *info) { switch (info->type) { case BCH_STR_HASH_crc32c: ctx->crc32c = crc32c(~0, &info->siphash_key.k0, sizeof(info->siphash_key.k0)); break; case BCH_STR_HASH_crc64: ctx->crc64 = crc64_be(~0, &info->siphash_key.k0, sizeof(info->siphash_key.k0)); break; case BCH_STR_HASH_siphash_old: case BCH_STR_HASH_siphash: SipHash24_Init(&ctx->siphash, &info->siphash_key); break; default: BUG(); } } static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx, const struct bch_hash_info *info, const void *data, size_t len) { switch (info->type) { case BCH_STR_HASH_crc32c: ctx->crc32c = crc32c(ctx->crc32c, data, len); break; case BCH_STR_HASH_crc64: ctx->crc64 = crc64_be(ctx->crc64, data, len); break; case BCH_STR_HASH_siphash_old: case BCH_STR_HASH_siphash: SipHash24_Update(&ctx->siphash, data, len); break; default: BUG(); } } static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx, const struct bch_hash_info *info) { switch (info->type) { case BCH_STR_HASH_crc32c: return ctx->crc32c; case BCH_STR_HASH_crc64: return ctx->crc64 >> 1; case BCH_STR_HASH_siphash_old: case BCH_STR_HASH_siphash: return SipHash24_End(&ctx->siphash) >> 1; default: BUG(); } } struct bch_hash_desc { enum btree_id btree_id; u8 key_type; u64 (*hash_key)(const struct bch_hash_info *, const void *); u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c); bool (*cmp_key)(struct bkey_s_c, const void *); bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c); bool (*is_visible)(subvol_inum inum, struct bkey_s_c); }; static inline bool is_visible_key(struct bch_hash_desc desc, subvol_inum inum, struct bkey_s_c k) { return k.k->type == desc.key_type && (!desc.is_visible || !inum.inum || desc.is_visible(inum, k)); } static __always_inline struct bkey_s_c bch2_hash_lookup_in_snapshot(struct btree_trans *trans, struct btree_iter *iter, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, const void *key, enum btree_iter_update_trigger_flags flags, u32 snapshot) { struct bkey_s_c k; int ret; for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), POS(inum.inum, U64_MAX), BTREE_ITER_slots|flags, k, ret) { if (is_visible_key(desc, inum, k)) { if (!desc.cmp_key(k, key)) return k; } else if (k.k->type == KEY_TYPE_hash_whiteout) { ; } else { /* hole, not found */ break; } } bch2_trans_iter_exit(trans, iter); return bkey_s_c_err(ret ?: -BCH_ERR_ENOENT_str_hash_lookup); } static __always_inline struct bkey_s_c bch2_hash_lookup(struct btree_trans *trans, struct btree_iter *iter, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, const void *key, enum btree_iter_update_trigger_flags flags) { u32 snapshot; int ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); if (ret) return bkey_s_c_err(ret); return bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot); } static __always_inline int bch2_hash_hole(struct btree_trans *trans, struct btree_iter *iter, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, const void *key) { struct bkey_s_c k; u32 snapshot; int ret; ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); if (ret) return ret; for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), POS(inum.inum, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent, k, ret) if (!is_visible_key(desc, inum, k)) return 0; bch2_trans_iter_exit(trans, iter); return ret ?: -BCH_ERR_ENOSPC_str_hash_create; } static __always_inline int bch2_hash_needs_whiteout(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, struct btree_iter *start) { struct btree_iter iter; struct bkey_s_c k; int ret; bch2_trans_copy_iter(trans, &iter, start); bch2_btree_iter_advance(trans, &iter); for_each_btree_key_continue_norestart(trans, iter, BTREE_ITER_slots, k, ret) { if (k.k->type != desc.key_type && k.k->type != KEY_TYPE_hash_whiteout) break; if (k.k->type == desc.key_type && desc.hash_bkey(info, k) <= start->pos.offset) { ret = 1; break; } } bch2_trans_iter_exit(trans, &iter); return ret; } static __always_inline struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans, struct btree_iter *iter, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, u32 snapshot, struct bkey_i *insert, enum btree_iter_update_trigger_flags flags) { struct btree_iter slot = {}; struct bkey_s_c k; bool found = false; int ret; for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(insert->k.p.inode, desc.hash_bkey(info, bkey_i_to_s_c(insert)), snapshot), POS(insert->k.p.inode, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent|flags, k, ret) { if (is_visible_key(desc, inum, k)) { if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert))) goto found; /* hash collision: */ continue; } if (!slot.path && !(flags & STR_HASH_must_replace)) bch2_trans_copy_iter(trans, &slot, iter); if (k.k->type != KEY_TYPE_hash_whiteout) goto not_found; } if (!ret) ret = -BCH_ERR_ENOSPC_str_hash_create; out: bch2_trans_iter_exit(trans, &slot); bch2_trans_iter_exit(trans, iter); return ret ? bkey_s_c_err(ret) : bkey_s_c_null; found: found = true; not_found: if (found && (flags & STR_HASH_must_create)) { bch2_trans_iter_exit(trans, &slot); return k; } else if (!found && (flags & STR_HASH_must_replace)) { ret = -BCH_ERR_ENOENT_str_hash_set_must_replace; } else { if (!found && slot.path) swap(*iter, slot); insert->k.p = iter->pos; ret = bch2_trans_update(trans, iter, insert, flags); } goto out; } static __always_inline int bch2_hash_set_in_snapshot(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, u32 snapshot, struct bkey_i *insert, enum btree_iter_update_trigger_flags flags) { struct btree_iter iter; struct bkey_s_c k = bch2_hash_set_or_get_in_snapshot(trans, &iter, desc, info, inum, snapshot, insert, flags); int ret = bkey_err(k); if (ret) return ret; if (k.k) { bch2_trans_iter_exit(trans, &iter); return -BCH_ERR_EEXIST_str_hash_set; } return 0; } static __always_inline int bch2_hash_set(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, struct bkey_i *insert, enum btree_iter_update_trigger_flags flags) { insert->k.p.inode = inum.inum; u32 snapshot; return bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?: bch2_hash_set_in_snapshot(trans, desc, info, inum, snapshot, insert, flags); } static __always_inline int bch2_hash_delete_at(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, struct btree_iter *iter, enum btree_iter_update_trigger_flags flags) { struct bkey_i *delete; int ret; delete = bch2_trans_kmalloc(trans, sizeof(*delete)); ret = PTR_ERR_OR_ZERO(delete); if (ret) return ret; ret = bch2_hash_needs_whiteout(trans, desc, info, iter); if (ret < 0) return ret; bkey_init(&delete->k); delete->k.p = iter->pos; delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted; return bch2_trans_update(trans, iter, delete, flags); } static __always_inline int bch2_hash_delete(struct btree_trans *trans, const struct bch_hash_desc desc, const struct bch_hash_info *info, subvol_inum inum, const void *key) { struct btree_iter iter; struct bkey_s_c k = bch2_hash_lookup(trans, &iter, desc, info, inum, key, BTREE_ITER_intent); int ret = bkey_err(k); if (ret) return ret; ret = bch2_hash_delete_at(trans, desc, info, &iter, 0); bch2_trans_iter_exit(trans, &iter); return ret; } struct snapshots_seen; int __bch2_str_hash_check_key(struct btree_trans *, struct snapshots_seen *, const struct bch_hash_desc *, struct bch_hash_info *, struct btree_iter *, struct bkey_s_c); static inline int bch2_str_hash_check_key(struct btree_trans *trans, struct snapshots_seen *s, const struct bch_hash_desc *desc, struct bch_hash_info *hash_info, struct btree_iter *k_iter, struct bkey_s_c hash_k) { if (hash_k.k->type != desc->key_type) return 0; if (likely(desc->hash_bkey(hash_info, hash_k) == hash_k.k->p.offset)) return 0; return __bch2_str_hash_check_key(trans, s, desc, hash_info, k_iter, hash_k); } #endif /* _BCACHEFS_STR_HASH_H */ |
32 32 32 32 31 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 | // SPDX-License-Identifier: GPL-2.0-or-later /* AFS client file system * * Copyright (C) 2002,5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/sched.h> #include <linux/random.h> #include <linux/proc_fs.h> #define CREATE_TRACE_POINTS #include "internal.h" MODULE_DESCRIPTION("AFS Client File System"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); unsigned afs_debug; module_param_named(debug, afs_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "AFS debugging mask"); static char *rootcell; module_param(rootcell, charp, 0); MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); struct workqueue_struct *afs_wq; static struct proc_dir_entry *afs_proc_symlink; #if defined(CONFIG_ALPHA) const char afs_init_sysname[] = "alpha_linux26"; #elif defined(CONFIG_X86_64) const char afs_init_sysname[] = "amd64_linux26"; #elif defined(CONFIG_ARM) const char afs_init_sysname[] = "arm_linux26"; #elif defined(CONFIG_ARM64) const char afs_init_sysname[] = "aarch64_linux26"; #elif defined(CONFIG_X86_32) const char afs_init_sysname[] = "i386_linux26"; #elif defined(CONFIG_PPC64) const char afs_init_sysname[] = "ppc64_linux26"; #elif defined(CONFIG_PPC32) const char afs_init_sysname[] = "ppc_linux26"; #elif defined(CONFIG_S390) #ifdef CONFIG_64BIT const char afs_init_sysname[] = "s390x_linux26"; #else const char afs_init_sysname[] = "s390_linux26"; #endif #elif defined(CONFIG_SPARC64) const char afs_init_sysname[] = "sparc64_linux26"; #elif defined(CONFIG_SPARC32) const char afs_init_sysname[] = "sparc_linux26"; #else const char afs_init_sysname[] = "unknown_linux26"; #endif /* * Initialise an AFS network namespace record. */ static int __net_init afs_net_init(struct net *net_ns) { struct afs_sysnames *sysnames; struct afs_net *net = afs_net(net_ns); int ret; net->net = net_ns; net->live = true; generate_random_uuid((unsigned char *)&net->uuid); INIT_WORK(&net->charge_preallocation_work, afs_charge_preallocation); mutex_init(&net->socket_mutex); net->cells = RB_ROOT; idr_init(&net->cells_dyn_ino); init_rwsem(&net->cells_lock); mutex_init(&net->cells_alias_lock); mutex_init(&net->proc_cells_lock); INIT_HLIST_HEAD(&net->proc_cells); seqlock_init(&net->fs_lock); INIT_LIST_HEAD(&net->fs_probe_fast); INIT_LIST_HEAD(&net->fs_probe_slow); INIT_HLIST_HEAD(&net->fs_proc); INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher); timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0); atomic_set(&net->servers_outstanding, 1); ret = -ENOMEM; sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL); if (!sysnames) goto error_sysnames; sysnames->subs[0] = (char *)&afs_init_sysname; sysnames->nr = 1; refcount_set(&sysnames->usage, 1); net->sysnames = sysnames; rwlock_init(&net->sysnames_lock); /* Register the /proc stuff */ ret = afs_proc_init(net); if (ret < 0) goto error_proc; /* Initialise the cell DB */ ret = afs_cell_init(net, rootcell); if (ret < 0) goto error_cell_init; /* Create the RxRPC transport */ ret = afs_open_socket(net); if (ret < 0) goto error_open_socket; return 0; error_open_socket: net->live = false; afs_fs_probe_cleanup(net); afs_cell_purge(net); afs_wait_for_servers(net); error_cell_init: net->live = false; afs_proc_cleanup(net); error_proc: afs_put_sysnames(net->sysnames); error_sysnames: idr_destroy(&net->cells_dyn_ino); net->live = false; return ret; } /* * Clean up and destroy an AFS network namespace record. */ static void __net_exit afs_net_exit(struct net *net_ns) { struct afs_net *net = afs_net(net_ns); net->live = false; afs_fs_probe_cleanup(net); afs_cell_purge(net); afs_wait_for_servers(net); afs_close_socket(net); afs_proc_cleanup(net); afs_put_sysnames(net->sysnames); idr_destroy(&net->cells_dyn_ino); kfree_rcu(rcu_access_pointer(net->address_prefs), rcu); } static struct pernet_operations afs_net_ops = { .init = afs_net_init, .exit = afs_net_exit, .id = &afs_net_id, .size = sizeof(struct afs_net), }; /* * initialise the AFS client FS module */ static int __init afs_init(void) { int ret = -ENOMEM; printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); afs_wq = alloc_workqueue("afs", 0, 0); if (!afs_wq) goto error_afs_wq; afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!afs_async_calls) goto error_async; afs_lock_manager = alloc_workqueue("kafs_lockd", WQ_MEM_RECLAIM, 0); if (!afs_lock_manager) goto error_lockmgr; ret = register_pernet_device(&afs_net_ops); if (ret < 0) goto error_net; /* register the filesystems */ ret = afs_fs_init(); if (ret < 0) goto error_fs; afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs"); if (!afs_proc_symlink) { ret = -ENOMEM; goto error_proc; } return ret; error_proc: afs_fs_exit(); error_fs: unregister_pernet_device(&afs_net_ops); error_net: destroy_workqueue(afs_lock_manager); error_lockmgr: destroy_workqueue(afs_async_calls); error_async: destroy_workqueue(afs_wq); error_afs_wq: rcu_barrier(); printk(KERN_ERR "kAFS: failed to register: %d\n", ret); return ret; } /* XXX late_initcall is kludgy, but the only alternative seems to create * a transport upon the first mount, which is worse. Or is it? */ late_initcall(afs_init); /* must be called after net/ to create socket */ /* * clean up on module removal */ static void __exit afs_exit(void) { printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n"); proc_remove(afs_proc_symlink); afs_fs_exit(); unregister_pernet_device(&afs_net_ops); destroy_workqueue(afs_lock_manager); destroy_workqueue(afs_async_calls); destroy_workqueue(afs_wq); afs_clean_up_permit_cache(); rcu_barrier(); } module_exit(afs_exit); |
2 2 2 2 2 2 2 3 2 3 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2006 * NTT (Nippon Telegraph and Telephone Corporation). */ /* * Algorithm Specification * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html */ #include <crypto/algapi.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/unaligned.h> static const u32 camellia_sp1110[256] = { 0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00, 0xb3b3b300, 0x27272700, 0xc0c0c000, 0xe5e5e500, 0xe4e4e400, 0x85858500, 0x57575700, 0x35353500, 0xeaeaea00, 0x0c0c0c00, 0xaeaeae00, 0x41414100, 0x23232300, 0xefefef00, 0x6b6b6b00, 0x93939300, 0x45454500, 0x19191900, 0xa5a5a500, 0x21212100, 0xededed00, 0x0e0e0e00, 0x4f4f4f00, 0x4e4e4e00, 0x1d1d1d00, 0x65656500, 0x92929200, 0xbdbdbd00, 0x86868600, 0xb8b8b800, 0xafafaf00, 0x8f8f8f00, 0x7c7c7c00, 0xebebeb00, 0x1f1f1f00, 0xcecece00, 0x3e3e3e00, 0x30303000, 0xdcdcdc00, 0x5f5f5f00, 0x5e5e5e00, 0xc5c5c500, 0x0b0b0b00, 0x1a1a1a00, 0xa6a6a600, 0xe1e1e100, 0x39393900, 0xcacaca00, 0xd5d5d500, 0x47474700, 0x5d5d5d00, 0x3d3d3d00, 0xd9d9d900, 0x01010100, 0x5a5a5a00, 0xd6d6d600, 0x51515100, 0x56565600, 0x6c6c6c00, 0x4d4d4d00, 0x8b8b8b00, 0x0d0d0d00, 0x9a9a9a00, 0x66666600, 0xfbfbfb00, 0xcccccc00, 0xb0b0b000, 0x2d2d2d00, 0x74747400, 0x12121200, 0x2b2b2b00, 0x20202000, 0xf0f0f000, 0xb1b1b100, 0x84848400, 0x99999900, 0xdfdfdf00, 0x4c4c4c00, 0xcbcbcb00, 0xc2c2c200, 0x34343400, 0x7e7e7e00, 0x76767600, 0x05050500, 0x6d6d6d00, 0xb7b7b700, 0xa9a9a900, 0x31313100, 0xd1d1d100, 0x17171700, 0x04040400, 0xd7d7d700, 0x14141400, 0x58585800, 0x3a3a3a00, 0x61616100, 0xdedede00, 0x1b1b1b00, 0x11111100, 0x1c1c1c00, 0x32323200, 0x0f0f0f00, 0x9c9c9c00, 0x16161600, 0x53535300, 0x18181800, 0xf2f2f200, 0x22222200, 0xfefefe00, 0x44444400, 0xcfcfcf00, 0xb2b2b200, 0xc3c3c300, 0xb5b5b500, 0x7a7a7a00, 0x91919100, 0x24242400, 0x08080800, 0xe8e8e800, 0xa8a8a800, 0x60606000, 0xfcfcfc00, 0x69696900, 0x50505000, 0xaaaaaa00, 0xd0d0d000, 0xa0a0a000, 0x7d7d7d00, 0xa1a1a100, 0x89898900, 0x62626200, 0x97979700, 0x54545400, 0x5b5b5b00, 0x1e1e1e00, 0x95959500, 0xe0e0e000, 0xffffff00, 0x64646400, 0xd2d2d200, 0x10101000, 0xc4c4c400, 0x00000000, 0x48484800, 0xa3a3a300, 0xf7f7f700, 0x75757500, 0xdbdbdb00, 0x8a8a8a00, 0x03030300, 0xe6e6e600, 0xdadada00, 0x09090900, 0x3f3f3f00, 0xdddddd00, 0x94949400, 0x87878700, 0x5c5c5c00, 0x83838300, 0x02020200, 0xcdcdcd00, 0x4a4a4a00, 0x90909000, 0x33333300, 0x73737300, 0x67676700, 0xf6f6f600, 0xf3f3f300, 0x9d9d9d00, 0x7f7f7f00, 0xbfbfbf00, 0xe2e2e200, 0x52525200, 0x9b9b9b00, 0xd8d8d800, 0x26262600, 0xc8c8c800, 0x37373700, 0xc6c6c600, 0x3b3b3b00, 0x81818100, 0x96969600, 0x6f6f6f00, 0x4b4b4b00, 0x13131300, 0xbebebe00, 0x63636300, 0x2e2e2e00, 0xe9e9e900, 0x79797900, 0xa7a7a700, 0x8c8c8c00, 0x9f9f9f00, 0x6e6e6e00, 0xbcbcbc00, 0x8e8e8e00, 0x29292900, 0xf5f5f500, 0xf9f9f900, 0xb6b6b600, 0x2f2f2f00, 0xfdfdfd00, 0xb4b4b400, 0x59595900, 0x78787800, 0x98989800, 0x06060600, 0x6a6a6a00, 0xe7e7e700, 0x46464600, 0x71717100, 0xbababa00, 0xd4d4d400, 0x25252500, 0xababab00, 0x42424200, 0x88888800, 0xa2a2a200, 0x8d8d8d00, 0xfafafa00, 0x72727200, 0x07070700, 0xb9b9b900, 0x55555500, 0xf8f8f800, 0xeeeeee00, 0xacacac00, 0x0a0a0a00, 0x36363600, 0x49494900, 0x2a2a2a00, 0x68686800, 0x3c3c3c00, 0x38383800, 0xf1f1f100, 0xa4a4a400, 0x40404000, 0x28282800, 0xd3d3d300, 0x7b7b7b00, 0xbbbbbb00, 0xc9c9c900, 0x43434300, 0xc1c1c100, 0x15151500, 0xe3e3e300, 0xadadad00, 0xf4f4f400, 0x77777700, 0xc7c7c700, 0x80808000, 0x9e9e9e00, }; static const u32 camellia_sp0222[256] = { 0x00e0e0e0, 0x00050505, 0x00585858, 0x00d9d9d9, 0x00676767, 0x004e4e4e, 0x00818181, 0x00cbcbcb, 0x00c9c9c9, 0x000b0b0b, 0x00aeaeae, 0x006a6a6a, 0x00d5d5d5, 0x00181818, 0x005d5d5d, 0x00828282, 0x00464646, 0x00dfdfdf, 0x00d6d6d6, 0x00272727, 0x008a8a8a, 0x00323232, 0x004b4b4b, 0x00424242, 0x00dbdbdb, 0x001c1c1c, 0x009e9e9e, 0x009c9c9c, 0x003a3a3a, 0x00cacaca, 0x00252525, 0x007b7b7b, 0x000d0d0d, 0x00717171, 0x005f5f5f, 0x001f1f1f, 0x00f8f8f8, 0x00d7d7d7, 0x003e3e3e, 0x009d9d9d, 0x007c7c7c, 0x00606060, 0x00b9b9b9, 0x00bebebe, 0x00bcbcbc, 0x008b8b8b, 0x00161616, 0x00343434, 0x004d4d4d, 0x00c3c3c3, 0x00727272, 0x00959595, 0x00ababab, 0x008e8e8e, 0x00bababa, 0x007a7a7a, 0x00b3b3b3, 0x00020202, 0x00b4b4b4, 0x00adadad, 0x00a2a2a2, 0x00acacac, 0x00d8d8d8, 0x009a9a9a, 0x00171717, 0x001a1a1a, 0x00353535, 0x00cccccc, 0x00f7f7f7, 0x00999999, 0x00616161, 0x005a5a5a, 0x00e8e8e8, 0x00242424, 0x00565656, 0x00404040, 0x00e1e1e1, 0x00636363, 0x00090909, 0x00333333, 0x00bfbfbf, 0x00989898, 0x00979797, 0x00858585, 0x00686868, 0x00fcfcfc, 0x00ececec, 0x000a0a0a, 0x00dadada, 0x006f6f6f, 0x00535353, 0x00626262, 0x00a3a3a3, 0x002e2e2e, 0x00080808, 0x00afafaf, 0x00282828, 0x00b0b0b0, 0x00747474, 0x00c2c2c2, 0x00bdbdbd, 0x00363636, 0x00222222, 0x00383838, 0x00646464, 0x001e1e1e, 0x00393939, 0x002c2c2c, 0x00a6a6a6, 0x00303030, 0x00e5e5e5, 0x00444444, 0x00fdfdfd, 0x00888888, 0x009f9f9f, 0x00656565, 0x00878787, 0x006b6b6b, 0x00f4f4f4, 0x00232323, 0x00484848, 0x00101010, 0x00d1d1d1, 0x00515151, 0x00c0c0c0, 0x00f9f9f9, 0x00d2d2d2, 0x00a0a0a0, 0x00555555, 0x00a1a1a1, 0x00414141, 0x00fafafa, 0x00434343, 0x00131313, 0x00c4c4c4, 0x002f2f2f, 0x00a8a8a8, 0x00b6b6b6, 0x003c3c3c, 0x002b2b2b, 0x00c1c1c1, 0x00ffffff, 0x00c8c8c8, 0x00a5a5a5, 0x00202020, 0x00898989, 0x00000000, 0x00909090, 0x00474747, 0x00efefef, 0x00eaeaea, 0x00b7b7b7, 0x00151515, 0x00060606, 0x00cdcdcd, 0x00b5b5b5, 0x00121212, 0x007e7e7e, 0x00bbbbbb, 0x00292929, 0x000f0f0f, 0x00b8b8b8, 0x00070707, 0x00040404, 0x009b9b9b, 0x00949494, 0x00212121, 0x00666666, 0x00e6e6e6, 0x00cecece, 0x00ededed, 0x00e7e7e7, 0x003b3b3b, 0x00fefefe, 0x007f7f7f, 0x00c5c5c5, 0x00a4a4a4, 0x00373737, 0x00b1b1b1, 0x004c4c4c, 0x00919191, 0x006e6e6e, 0x008d8d8d, 0x00767676, 0x00030303, 0x002d2d2d, 0x00dedede, 0x00969696, 0x00262626, 0x007d7d7d, 0x00c6c6c6, 0x005c5c5c, 0x00d3d3d3, 0x00f2f2f2, 0x004f4f4f, 0x00191919, 0x003f3f3f, 0x00dcdcdc, 0x00797979, 0x001d1d1d, 0x00525252, 0x00ebebeb, 0x00f3f3f3, 0x006d6d6d, 0x005e5e5e, 0x00fbfbfb, 0x00696969, 0x00b2b2b2, 0x00f0f0f0, 0x00313131, 0x000c0c0c, 0x00d4d4d4, 0x00cfcfcf, 0x008c8c8c, 0x00e2e2e2, 0x00757575, 0x00a9a9a9, 0x004a4a4a, 0x00575757, 0x00848484, 0x00111111, 0x00454545, 0x001b1b1b, 0x00f5f5f5, 0x00e4e4e4, 0x000e0e0e, 0x00737373, 0x00aaaaaa, 0x00f1f1f1, 0x00dddddd, 0x00595959, 0x00141414, 0x006c6c6c, 0x00929292, 0x00545454, 0x00d0d0d0, 0x00787878, 0x00707070, 0x00e3e3e3, 0x00494949, 0x00808080, 0x00505050, 0x00a7a7a7, 0x00f6f6f6, 0x00777777, 0x00939393, 0x00868686, 0x00838383, 0x002a2a2a, 0x00c7c7c7, 0x005b5b5b, 0x00e9e9e9, 0x00eeeeee, 0x008f8f8f, 0x00010101, 0x003d3d3d, }; static const u32 camellia_sp3033[256] = { 0x38003838, 0x41004141, 0x16001616, 0x76007676, 0xd900d9d9, 0x93009393, 0x60006060, 0xf200f2f2, 0x72007272, 0xc200c2c2, 0xab00abab, 0x9a009a9a, 0x75007575, 0x06000606, 0x57005757, 0xa000a0a0, 0x91009191, 0xf700f7f7, 0xb500b5b5, 0xc900c9c9, 0xa200a2a2, 0x8c008c8c, 0xd200d2d2, 0x90009090, 0xf600f6f6, 0x07000707, 0xa700a7a7, 0x27002727, 0x8e008e8e, 0xb200b2b2, 0x49004949, 0xde00dede, 0x43004343, 0x5c005c5c, 0xd700d7d7, 0xc700c7c7, 0x3e003e3e, 0xf500f5f5, 0x8f008f8f, 0x67006767, 0x1f001f1f, 0x18001818, 0x6e006e6e, 0xaf00afaf, 0x2f002f2f, 0xe200e2e2, 0x85008585, 0x0d000d0d, 0x53005353, 0xf000f0f0, 0x9c009c9c, 0x65006565, 0xea00eaea, 0xa300a3a3, 0xae00aeae, 0x9e009e9e, 0xec00ecec, 0x80008080, 0x2d002d2d, 0x6b006b6b, 0xa800a8a8, 0x2b002b2b, 0x36003636, 0xa600a6a6, 0xc500c5c5, 0x86008686, 0x4d004d4d, 0x33003333, 0xfd00fdfd, 0x66006666, 0x58005858, 0x96009696, 0x3a003a3a, 0x09000909, 0x95009595, 0x10001010, 0x78007878, 0xd800d8d8, 0x42004242, 0xcc00cccc, 0xef00efef, 0x26002626, 0xe500e5e5, 0x61006161, 0x1a001a1a, 0x3f003f3f, 0x3b003b3b, 0x82008282, 0xb600b6b6, 0xdb00dbdb, 0xd400d4d4, 0x98009898, 0xe800e8e8, 0x8b008b8b, 0x02000202, 0xeb00ebeb, 0x0a000a0a, 0x2c002c2c, 0x1d001d1d, 0xb000b0b0, 0x6f006f6f, 0x8d008d8d, 0x88008888, 0x0e000e0e, 0x19001919, 0x87008787, 0x4e004e4e, 0x0b000b0b, 0xa900a9a9, 0x0c000c0c, 0x79007979, 0x11001111, 0x7f007f7f, 0x22002222, 0xe700e7e7, 0x59005959, 0xe100e1e1, 0xda00dada, 0x3d003d3d, 0xc800c8c8, 0x12001212, 0x04000404, 0x74007474, 0x54005454, 0x30003030, 0x7e007e7e, 0xb400b4b4, 0x28002828, 0x55005555, 0x68006868, 0x50005050, 0xbe00bebe, 0xd000d0d0, 0xc400c4c4, 0x31003131, 0xcb00cbcb, 0x2a002a2a, 0xad00adad, 0x0f000f0f, 0xca00caca, 0x70007070, 0xff00ffff, 0x32003232, 0x69006969, 0x08000808, 0x62006262, 0x00000000, 0x24002424, 0xd100d1d1, 0xfb00fbfb, 0xba00baba, 0xed00eded, 0x45004545, 0x81008181, 0x73007373, 0x6d006d6d, 0x84008484, 0x9f009f9f, 0xee00eeee, 0x4a004a4a, 0xc300c3c3, 0x2e002e2e, 0xc100c1c1, 0x01000101, 0xe600e6e6, 0x25002525, 0x48004848, 0x99009999, 0xb900b9b9, 0xb300b3b3, 0x7b007b7b, 0xf900f9f9, 0xce00cece, 0xbf00bfbf, 0xdf00dfdf, 0x71007171, 0x29002929, 0xcd00cdcd, 0x6c006c6c, 0x13001313, 0x64006464, 0x9b009b9b, 0x63006363, 0x9d009d9d, 0xc000c0c0, 0x4b004b4b, 0xb700b7b7, 0xa500a5a5, 0x89008989, 0x5f005f5f, 0xb100b1b1, 0x17001717, 0xf400f4f4, 0xbc00bcbc, 0xd300d3d3, 0x46004646, 0xcf00cfcf, 0x37003737, 0x5e005e5e, 0x47004747, 0x94009494, 0xfa00fafa, 0xfc00fcfc, 0x5b005b5b, 0x97009797, 0xfe00fefe, 0x5a005a5a, 0xac00acac, 0x3c003c3c, 0x4c004c4c, 0x03000303, 0x35003535, 0xf300f3f3, 0x23002323, 0xb800b8b8, 0x5d005d5d, 0x6a006a6a, 0x92009292, 0xd500d5d5, 0x21002121, 0x44004444, 0x51005151, 0xc600c6c6, 0x7d007d7d, 0x39003939, 0x83008383, 0xdc00dcdc, 0xaa00aaaa, 0x7c007c7c, 0x77007777, 0x56005656, 0x05000505, 0x1b001b1b, 0xa400a4a4, 0x15001515, 0x34003434, 0x1e001e1e, 0x1c001c1c, 0xf800f8f8, 0x52005252, 0x20002020, 0x14001414, 0xe900e9e9, 0xbd00bdbd, 0xdd00dddd, 0xe400e4e4, 0xa100a1a1, 0xe000e0e0, 0x8a008a8a, 0xf100f1f1, 0xd600d6d6, 0x7a007a7a, 0xbb00bbbb, 0xe300e3e3, 0x40004040, 0x4f004f4f, }; static const u32 camellia_sp4404[256] = { 0x70700070, 0x2c2c002c, 0xb3b300b3, 0xc0c000c0, 0xe4e400e4, 0x57570057, 0xeaea00ea, 0xaeae00ae, 0x23230023, 0x6b6b006b, 0x45450045, 0xa5a500a5, 0xeded00ed, 0x4f4f004f, 0x1d1d001d, 0x92920092, 0x86860086, 0xafaf00af, 0x7c7c007c, 0x1f1f001f, 0x3e3e003e, 0xdcdc00dc, 0x5e5e005e, 0x0b0b000b, 0xa6a600a6, 0x39390039, 0xd5d500d5, 0x5d5d005d, 0xd9d900d9, 0x5a5a005a, 0x51510051, 0x6c6c006c, 0x8b8b008b, 0x9a9a009a, 0xfbfb00fb, 0xb0b000b0, 0x74740074, 0x2b2b002b, 0xf0f000f0, 0x84840084, 0xdfdf00df, 0xcbcb00cb, 0x34340034, 0x76760076, 0x6d6d006d, 0xa9a900a9, 0xd1d100d1, 0x04040004, 0x14140014, 0x3a3a003a, 0xdede00de, 0x11110011, 0x32320032, 0x9c9c009c, 0x53530053, 0xf2f200f2, 0xfefe00fe, 0xcfcf00cf, 0xc3c300c3, 0x7a7a007a, 0x24240024, 0xe8e800e8, 0x60600060, 0x69690069, 0xaaaa00aa, 0xa0a000a0, 0xa1a100a1, 0x62620062, 0x54540054, 0x1e1e001e, 0xe0e000e0, 0x64640064, 0x10100010, 0x00000000, 0xa3a300a3, 0x75750075, 0x8a8a008a, 0xe6e600e6, 0x09090009, 0xdddd00dd, 0x87870087, 0x83830083, 0xcdcd00cd, 0x90900090, 0x73730073, 0xf6f600f6, 0x9d9d009d, 0xbfbf00bf, 0x52520052, 0xd8d800d8, 0xc8c800c8, 0xc6c600c6, 0x81810081, 0x6f6f006f, 0x13130013, 0x63630063, 0xe9e900e9, 0xa7a700a7, 0x9f9f009f, 0xbcbc00bc, 0x29290029, 0xf9f900f9, 0x2f2f002f, 0xb4b400b4, 0x78780078, 0x06060006, 0xe7e700e7, 0x71710071, 0xd4d400d4, 0xabab00ab, 0x88880088, 0x8d8d008d, 0x72720072, 0xb9b900b9, 0xf8f800f8, 0xacac00ac, 0x36360036, 0x2a2a002a, 0x3c3c003c, 0xf1f100f1, 0x40400040, 0xd3d300d3, 0xbbbb00bb, 0x43430043, 0x15150015, 0xadad00ad, 0x77770077, 0x80800080, 0x82820082, 0xecec00ec, 0x27270027, 0xe5e500e5, 0x85850085, 0x35350035, 0x0c0c000c, 0x41410041, 0xefef00ef, 0x93930093, 0x19190019, 0x21210021, 0x0e0e000e, 0x4e4e004e, 0x65650065, 0xbdbd00bd, 0xb8b800b8, 0x8f8f008f, 0xebeb00eb, 0xcece00ce, 0x30300030, 0x5f5f005f, 0xc5c500c5, 0x1a1a001a, 0xe1e100e1, 0xcaca00ca, 0x47470047, 0x3d3d003d, 0x01010001, 0xd6d600d6, 0x56560056, 0x4d4d004d, 0x0d0d000d, 0x66660066, 0xcccc00cc, 0x2d2d002d, 0x12120012, 0x20200020, 0xb1b100b1, 0x99990099, 0x4c4c004c, 0xc2c200c2, 0x7e7e007e, 0x05050005, 0xb7b700b7, 0x31310031, 0x17170017, 0xd7d700d7, 0x58580058, 0x61610061, 0x1b1b001b, 0x1c1c001c, 0x0f0f000f, 0x16160016, 0x18180018, 0x22220022, 0x44440044, 0xb2b200b2, 0xb5b500b5, 0x91910091, 0x08080008, 0xa8a800a8, 0xfcfc00fc, 0x50500050, 0xd0d000d0, 0x7d7d007d, 0x89890089, 0x97970097, 0x5b5b005b, 0x95950095, 0xffff00ff, 0xd2d200d2, 0xc4c400c4, 0x48480048, 0xf7f700f7, 0xdbdb00db, 0x03030003, 0xdada00da, 0x3f3f003f, 0x94940094, 0x5c5c005c, 0x02020002, 0x4a4a004a, 0x33330033, 0x67670067, 0xf3f300f3, 0x7f7f007f, 0xe2e200e2, 0x9b9b009b, 0x26260026, 0x37370037, 0x3b3b003b, 0x96960096, 0x4b4b004b, 0xbebe00be, 0x2e2e002e, 0x79790079, 0x8c8c008c, 0x6e6e006e, 0x8e8e008e, 0xf5f500f5, 0xb6b600b6, 0xfdfd00fd, 0x59590059, 0x98980098, 0x6a6a006a, 0x46460046, 0xbaba00ba, 0x25250025, 0x42420042, 0xa2a200a2, 0xfafa00fa, 0x07070007, 0x55550055, 0xeeee00ee, 0x0a0a000a, 0x49490049, 0x68680068, 0x38380038, 0xa4a400a4, 0x28280028, 0x7b7b007b, 0xc9c900c9, 0xc1c100c1, 0xe3e300e3, 0xf4f400f4, 0xc7c700c7, 0x9e9e009e, }; #define CAMELLIA_MIN_KEY_SIZE 16 #define CAMELLIA_MAX_KEY_SIZE 32 #define CAMELLIA_BLOCK_SIZE 16 #define CAMELLIA_TABLE_BYTE_LEN 272 /* * NB: L and R below stand for 'left' and 'right' as in written numbers. * That is, in (xxxL,xxxR) pair xxxL holds most significant digits, * _not_ least significant ones! */ /* key constants */ #define CAMELLIA_SIGMA1L (0xA09E667FL) #define CAMELLIA_SIGMA1R (0x3BCC908BL) #define CAMELLIA_SIGMA2L (0xB67AE858L) #define CAMELLIA_SIGMA2R (0x4CAA73B2L) #define CAMELLIA_SIGMA3L (0xC6EF372FL) #define CAMELLIA_SIGMA3R (0xE94F82BEL) #define CAMELLIA_SIGMA4L (0x54FF53A5L) #define CAMELLIA_SIGMA4R (0xF1D36F1CL) #define CAMELLIA_SIGMA5L (0x10E527FAL) #define CAMELLIA_SIGMA5R (0xDE682D1DL) #define CAMELLIA_SIGMA6L (0xB05688C2L) #define CAMELLIA_SIGMA6R (0xB3E6C1FDL) /* * macros */ #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) ({ \ w0 = ll; \ ll = (ll << bits) + (lr >> (32 - bits)); \ lr = (lr << bits) + (rl >> (32 - bits)); \ rl = (rl << bits) + (rr >> (32 - bits)); \ rr = (rr << bits) + (w0 >> (32 - bits)); \ }) #define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) ({ \ w0 = ll; \ w1 = lr; \ ll = (lr << (bits - 32)) + (rl >> (64 - bits)); \ lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \ rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \ rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \ }) #define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) ({ \ il = xl ^ kl; \ ir = xr ^ kr; \ t0 = il >> 16; \ t1 = ir >> 16; \ yl = camellia_sp1110[(u8)(ir)] \ ^ camellia_sp0222[(u8)(t1 >> 8)] \ ^ camellia_sp3033[(u8)(t1)] \ ^ camellia_sp4404[(u8)(ir >> 8)]; \ yr = camellia_sp1110[(u8)(t0 >> 8)] \ ^ camellia_sp0222[(u8)(t0)] \ ^ camellia_sp3033[(u8)(il >> 8)] \ ^ camellia_sp4404[(u8)(il)]; \ yl ^= yr; \ yr = ror32(yr, 8); \ yr ^= yl; \ }) #define SUBKEY_L(INDEX) (subkey[(INDEX)*2]) #define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1]) static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) { u32 dw, tl, tr; u32 kw4l, kw4r; /* absorb kw2 to other subkeys */ /* round 2 */ subL[3] ^= subL[1]; subR[3] ^= subR[1]; /* round 4 */ subL[5] ^= subL[1]; subR[5] ^= subR[1]; /* round 6 */ subL[7] ^= subL[1]; subR[7] ^= subR[1]; subL[1] ^= subR[1] & ~subR[9]; dw = subL[1] & subL[9]; subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */ /* round 8 */ subL[11] ^= subL[1]; subR[11] ^= subR[1]; /* round 10 */ subL[13] ^= subL[1]; subR[13] ^= subR[1]; /* round 12 */ subL[15] ^= subL[1]; subR[15] ^= subR[1]; subL[1] ^= subR[1] & ~subR[17]; dw = subL[1] & subL[17]; subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */ /* round 14 */ subL[19] ^= subL[1]; subR[19] ^= subR[1]; /* round 16 */ subL[21] ^= subL[1]; subR[21] ^= subR[1]; /* round 18 */ subL[23] ^= subL[1]; subR[23] ^= subR[1]; if (max == 24) { /* kw3 */ subL[24] ^= subL[1]; subR[24] ^= subR[1]; /* absorb kw4 to other subkeys */ kw4l = subL[25]; kw4r = subR[25]; } else { subL[1] ^= subR[1] & ~subR[25]; dw = subL[1] & subL[25]; subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */ /* round 20 */ subL[27] ^= subL[1]; subR[27] ^= subR[1]; /* round 22 */ subL[29] ^= subL[1]; subR[29] ^= subR[1]; /* round 24 */ subL[31] ^= subL[1]; subR[31] ^= subR[1]; /* kw3 */ subL[32] ^= subL[1]; subR[32] ^= subR[1]; /* absorb kw4 to other subkeys */ kw4l = subL[33]; kw4r = subR[33]; /* round 23 */ subL[30] ^= kw4l; subR[30] ^= kw4r; /* round 21 */ subL[28] ^= kw4l; subR[28] ^= kw4r; /* round 19 */ subL[26] ^= kw4l; subR[26] ^= kw4r; kw4l ^= kw4r & ~subR[24]; dw = kw4l & subL[24]; kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */ } /* round 17 */ subL[22] ^= kw4l; subR[22] ^= kw4r; /* round 15 */ subL[20] ^= kw4l; subR[20] ^= kw4r; /* round 13 */ subL[18] ^= kw4l; subR[18] ^= kw4r; kw4l ^= kw4r & ~subR[16]; dw = kw4l & subL[16]; kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */ /* round 11 */ subL[14] ^= kw4l; subR[14] ^= kw4r; /* round 9 */ subL[12] ^= kw4l; subR[12] ^= kw4r; /* round 7 */ subL[10] ^= kw4l; subR[10] ^= kw4r; kw4l ^= kw4r & ~subR[8]; dw = kw4l & subL[8]; kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */ /* round 5 */ subL[6] ^= kw4l; subR[6] ^= kw4r; /* round 3 */ subL[4] ^= kw4l; subR[4] ^= kw4r; /* round 1 */ subL[2] ^= kw4l; subR[2] ^= kw4r; /* kw1 */ subL[0] ^= kw4l; subR[0] ^= kw4r; /* key XOR is end of F-function */ SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */ SUBKEY_R(0) = subR[0] ^ subR[2]; SUBKEY_L(2) = subL[3]; /* round 1 */ SUBKEY_R(2) = subR[3]; SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */ SUBKEY_R(3) = subR[2] ^ subR[4]; SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */ SUBKEY_R(4) = subR[3] ^ subR[5]; SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */ SUBKEY_R(5) = subR[4] ^ subR[6]; SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */ SUBKEY_R(6) = subR[5] ^ subR[7]; tl = subL[10] ^ (subR[10] & ~subR[8]); dw = tl & subL[8]; /* FL(kl1) */ tr = subR[10] ^ rol32(dw, 1); SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ SUBKEY_R(7) = subR[6] ^ tr; SUBKEY_L(8) = subL[8]; /* FL(kl1) */ SUBKEY_R(8) = subR[8]; SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */ SUBKEY_R(9) = subR[9]; tl = subL[7] ^ (subR[7] & ~subR[9]); dw = tl & subL[9]; /* FLinv(kl2) */ tr = subR[7] ^ rol32(dw, 1); SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ SUBKEY_R(10) = tr ^ subR[11]; SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ SUBKEY_R(11) = subR[10] ^ subR[12]; SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */ SUBKEY_R(12) = subR[11] ^ subR[13]; SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */ SUBKEY_R(13) = subR[12] ^ subR[14]; SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */ SUBKEY_R(14) = subR[13] ^ subR[15]; tl = subL[18] ^ (subR[18] & ~subR[16]); dw = tl & subL[16]; /* FL(kl3) */ tr = subR[18] ^ rol32(dw, 1); SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ SUBKEY_R(15) = subR[14] ^ tr; SUBKEY_L(16) = subL[16]; /* FL(kl3) */ SUBKEY_R(16) = subR[16]; SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */ SUBKEY_R(17) = subR[17]; tl = subL[15] ^ (subR[15] & ~subR[17]); dw = tl & subL[17]; /* FLinv(kl4) */ tr = subR[15] ^ rol32(dw, 1); SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ SUBKEY_R(18) = tr ^ subR[19]; SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ SUBKEY_R(19) = subR[18] ^ subR[20]; SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */ SUBKEY_R(20) = subR[19] ^ subR[21]; SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */ SUBKEY_R(21) = subR[20] ^ subR[22]; SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */ SUBKEY_R(22) = subR[21] ^ subR[23]; if (max == 24) { SUBKEY_L(23) = subL[22]; /* round 18 */ SUBKEY_R(23) = subR[22]; SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */ SUBKEY_R(24) = subR[24] ^ subR[23]; } else { tl = subL[26] ^ (subR[26] & ~subR[24]); dw = tl & subL[24]; /* FL(kl5) */ tr = subR[26] ^ rol32(dw, 1); SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ SUBKEY_R(23) = subR[22] ^ tr; SUBKEY_L(24) = subL[24]; /* FL(kl5) */ SUBKEY_R(24) = subR[24]; SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */ SUBKEY_R(25) = subR[25]; tl = subL[23] ^ (subR[23] & ~subR[25]); dw = tl & subL[25]; /* FLinv(kl6) */ tr = subR[23] ^ rol32(dw, 1); SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ SUBKEY_R(26) = tr ^ subR[27]; SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ SUBKEY_R(27) = subR[26] ^ subR[28]; SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */ SUBKEY_R(28) = subR[27] ^ subR[29]; SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */ SUBKEY_R(29) = subR[28] ^ subR[30]; SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */ SUBKEY_R(30) = subR[29] ^ subR[31]; SUBKEY_L(31) = subL[30]; /* round 24 */ SUBKEY_R(31) = subR[30]; SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */ SUBKEY_R(32) = subR[32] ^ subR[31]; } } static void camellia_setup128(const unsigned char *key, u32 *subkey) { u32 kll, klr, krl, krr; u32 il, ir, t0, t1, w0, w1; u32 subL[26]; u32 subR[26]; /** * k == kll || klr || krl || krr (|| is concatenation) */ kll = get_unaligned_be32(key); klr = get_unaligned_be32(key + 4); krl = get_unaligned_be32(key + 8); krr = get_unaligned_be32(key + 12); /* generate KL dependent subkeys */ /* kw1 */ subL[0] = kll; subR[0] = klr; /* kw2 */ subL[1] = krl; subR[1] = krr; /* rotation left shift 15bit */ ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k3 */ subL[4] = kll; subR[4] = klr; /* k4 */ subL[5] = krl; subR[5] = krr; /* rotation left shift 15+30bit */ ROLDQ(kll, klr, krl, krr, w0, w1, 30); /* k7 */ subL[10] = kll; subR[10] = klr; /* k8 */ subL[11] = krl; subR[11] = krr; /* rotation left shift 15+30+15bit */ ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k10 */ subL[13] = krl; subR[13] = krr; /* rotation left shift 15+30+15+17 bit */ ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* kl3 */ subL[16] = kll; subR[16] = klr; /* kl4 */ subL[17] = krl; subR[17] = krr; /* rotation left shift 15+30+15+17+17 bit */ ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* k13 */ subL[18] = kll; subR[18] = klr; /* k14 */ subL[19] = krl; subR[19] = krr; /* rotation left shift 15+30+15+17+17+17 bit */ ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* k17 */ subL[22] = kll; subR[22] = klr; /* k18 */ subL[23] = krl; subR[23] = krr; /* generate KA */ kll = subL[0]; klr = subR[0]; krl = subL[1]; krr = subR[1]; CAMELLIA_F(kll, klr, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R, w0, w1, il, ir, t0, t1); krl ^= w0; krr ^= w1; CAMELLIA_F(krl, krr, CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R, kll, klr, il, ir, t0, t1); /* current status == (kll, klr, w0, w1) */ CAMELLIA_F(kll, klr, CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R, krl, krr, il, ir, t0, t1); krl ^= w0; krr ^= w1; CAMELLIA_F(krl, krr, CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R, w0, w1, il, ir, t0, t1); kll ^= w0; klr ^= w1; /* generate KA dependent subkeys */ /* k1, k2 */ subL[2] = kll; subR[2] = klr; subL[3] = krl; subR[3] = krr; ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k5,k6 */ subL[6] = kll; subR[6] = klr; subL[7] = krl; subR[7] = krr; ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* kl1, kl2 */ subL[8] = kll; subR[8] = klr; subL[9] = krl; subR[9] = krr; ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k9 */ subL[12] = kll; subR[12] = klr; ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k11, k12 */ subL[14] = kll; subR[14] = klr; subL[15] = krl; subR[15] = krr; ROLDQo32(kll, klr, krl, krr, w0, w1, 34); /* k15, k16 */ subL[20] = kll; subR[20] = klr; subL[21] = krl; subR[21] = krr; ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* kw3, kw4 */ subL[24] = kll; subR[24] = klr; subL[25] = krl; subR[25] = krr; camellia_setup_tail(subkey, subL, subR, 24); } static void camellia_setup256(const unsigned char *key, u32 *subkey) { u32 kll, klr, krl, krr; /* left half of key */ u32 krll, krlr, krrl, krrr; /* right half of key */ u32 il, ir, t0, t1, w0, w1; /* temporary variables */ u32 subL[34]; u32 subR[34]; /** * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) * (|| is concatenation) */ kll = get_unaligned_be32(key); klr = get_unaligned_be32(key + 4); krl = get_unaligned_be32(key + 8); krr = get_unaligned_be32(key + 12); krll = get_unaligned_be32(key + 16); krlr = get_unaligned_be32(key + 20); krrl = get_unaligned_be32(key + 24); krrr = get_unaligned_be32(key + 28); /* generate KL dependent subkeys */ /* kw1 */ subL[0] = kll; subR[0] = klr; /* kw2 */ subL[1] = krl; subR[1] = krr; ROLDQo32(kll, klr, krl, krr, w0, w1, 45); /* k9 */ subL[12] = kll; subR[12] = klr; /* k10 */ subL[13] = krl; subR[13] = krr; ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* kl3 */ subL[16] = kll; subR[16] = klr; /* kl4 */ subL[17] = krl; subR[17] = krr; ROLDQ(kll, klr, krl, krr, w0, w1, 17); /* k17 */ subL[22] = kll; subR[22] = klr; /* k18 */ subL[23] = krl; subR[23] = krr; ROLDQo32(kll, klr, krl, krr, w0, w1, 34); /* k23 */ subL[30] = kll; subR[30] = klr; /* k24 */ subL[31] = krl; subR[31] = krr; /* generate KR dependent subkeys */ ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15); /* k3 */ subL[4] = krll; subR[4] = krlr; /* k4 */ subL[5] = krrl; subR[5] = krrr; ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15); /* kl1 */ subL[8] = krll; subR[8] = krlr; /* kl2 */ subL[9] = krrl; subR[9] = krrr; ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); /* k13 */ subL[18] = krll; subR[18] = krlr; /* k14 */ subL[19] = krrl; subR[19] = krrr; ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34); /* k19 */ subL[26] = krll; subR[26] = krlr; /* k20 */ subL[27] = krrl; subR[27] = krrr; ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34); /* generate KA */ kll = subL[0] ^ krll; klr = subR[0] ^ krlr; krl = subL[1] ^ krrl; krr = subR[1] ^ krrr; CAMELLIA_F(kll, klr, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R, w0, w1, il, ir, t0, t1); krl ^= w0; krr ^= w1; CAMELLIA_F(krl, krr, CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R, kll, klr, il, ir, t0, t1); kll ^= krll; klr ^= krlr; CAMELLIA_F(kll, klr, CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R, krl, krr, il, ir, t0, t1); krl ^= w0 ^ krrl; krr ^= w1 ^ krrr; CAMELLIA_F(krl, krr, CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R, w0, w1, il, ir, t0, t1); kll ^= w0; klr ^= w1; /* generate KB */ krll ^= kll; krlr ^= klr; krrl ^= krl; krrr ^= krr; CAMELLIA_F(krll, krlr, CAMELLIA_SIGMA5L, CAMELLIA_SIGMA5R, w0, w1, il, ir, t0, t1); krrl ^= w0; krrr ^= w1; CAMELLIA_F(krrl, krrr, CAMELLIA_SIGMA6L, CAMELLIA_SIGMA6R, w0, w1, il, ir, t0, t1); krll ^= w0; krlr ^= w1; /* generate KA dependent subkeys */ ROLDQ(kll, klr, krl, krr, w0, w1, 15); /* k5 */ subL[6] = kll; subR[6] = klr; /* k6 */ subL[7] = krl; subR[7] = krr; ROLDQ(kll, klr, krl, krr, w0, w1, 30); /* k11 */ subL[14] = kll; subR[14] = klr; /* k12 */ subL[15] = krl; subR[15] = krr; /* rotation left shift 32bit */ /* kl5 */ subL[24] = klr; subR[24] = krl; /* kl6 */ subL[25] = krr; subR[25] = kll; /* rotation left shift 49 from k11,k12 -> k21,k22 */ ROLDQo32(kll, klr, krl, krr, w0, w1, 49); /* k21 */ subL[28] = kll; subR[28] = klr; /* k22 */ subL[29] = krl; subR[29] = krr; /* generate KB dependent subkeys */ /* k1 */ subL[2] = krll; subR[2] = krlr; /* k2 */ subL[3] = krrl; subR[3] = krrr; ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); /* k7 */ subL[10] = krll; subR[10] = krlr; /* k8 */ subL[11] = krrl; subR[11] = krrr; ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30); /* k15 */ subL[20] = krll; subR[20] = krlr; /* k16 */ subL[21] = krrl; subR[21] = krrr; ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51); /* kw3 */ subL[32] = krll; subR[32] = krlr; /* kw4 */ subL[33] = krrl; subR[33] = krrr; camellia_setup_tail(subkey, subL, subR, 32); } static void camellia_setup192(const unsigned char *key, u32 *subkey) { unsigned char kk[32]; u32 krll, krlr, krrl, krrr; memcpy(kk, key, 24); memcpy((unsigned char *)&krll, key+16, 4); memcpy((unsigned char *)&krlr, key+20, 4); krrl = ~krll; krrr = ~krlr; memcpy(kk+24, (unsigned char *)&krrl, 4); memcpy(kk+28, (unsigned char *)&krrr, 4); camellia_setup256(kk, subkey); } /* * Encrypt/decrypt */ #define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) ({ \ t0 = kll; \ t2 = krr; \ t0 &= ll; \ t2 |= rr; \ rl ^= t2; \ lr ^= rol32(t0, 1); \ t3 = krl; \ t1 = klr; \ t3 &= rl; \ t1 |= lr; \ ll ^= t1; \ rr ^= rol32(t3, 1); \ }) #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) ({ \ yl ^= kl; \ yr ^= kr; \ ir = camellia_sp1110[(u8)xr]; \ il = camellia_sp1110[(u8)(xl >> 24)]; \ ir ^= camellia_sp0222[(u8)(xr >> 24)]; \ il ^= camellia_sp0222[(u8)(xl >> 16)]; \ ir ^= camellia_sp3033[(u8)(xr >> 16)]; \ il ^= camellia_sp3033[(u8)(xl >> 8)]; \ ir ^= camellia_sp4404[(u8)(xr >> 8)]; \ il ^= camellia_sp4404[(u8)xl]; \ ir ^= il; \ yl ^= ir; \ yr ^= ror32(il, 8) ^ ir; \ }) /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) { u32 il, ir, t0, t1; /* temporary variables */ /* pre whitening but absorb kw2 */ io[0] ^= SUBKEY_L(0); io[1] ^= SUBKEY_R(0); /* main iteration */ #define ROUNDS(i) ({ \ CAMELLIA_ROUNDSM(io[0], io[1], \ SUBKEY_L(i + 2), SUBKEY_R(i + 2), \ io[2], io[3], il, ir); \ CAMELLIA_ROUNDSM(io[2], io[3], \ SUBKEY_L(i + 3), SUBKEY_R(i + 3), \ io[0], io[1], il, ir); \ CAMELLIA_ROUNDSM(io[0], io[1], \ SUBKEY_L(i + 4), SUBKEY_R(i + 4), \ io[2], io[3], il, ir); \ CAMELLIA_ROUNDSM(io[2], io[3], \ SUBKEY_L(i + 5), SUBKEY_R(i + 5), \ io[0], io[1], il, ir); \ CAMELLIA_ROUNDSM(io[0], io[1], \ SUBKEY_L(i + 6), SUBKEY_R(i + 6), \ io[2], io[3], il, ir); \ CAMELLIA_ROUNDSM(io[2], io[3], \ SUBKEY_L(i + 7), SUBKEY_R(i + 7), \ io[0], io[1], il, ir); \ }) #define FLS(i) ({ \ CAMELLIA_FLS(io[0], io[1], io[2], io[3], \ SUBKEY_L(i + 0), SUBKEY_R(i + 0), \ SUBKEY_L(i + 1), SUBKEY_R(i + 1), \ t0, t1, il, ir); \ }) ROUNDS(0); FLS(8); ROUNDS(8); FLS(16); ROUNDS(16); if (max == 32) { FLS(24); ROUNDS(24); } #undef ROUNDS #undef FLS /* post whitening but kw4 */ io[2] ^= SUBKEY_L(max); io[3] ^= SUBKEY_R(max); /* NB: io[0],[1] should be swapped with [2],[3] by caller! */ } static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) { u32 il, ir, t0, t1; /* temporary variables */ /* pre whitening but absorb kw2 */ io[0] ^= SUBKEY_L(i); io[1] ^= SUBKEY_R(i); /* main iteration */ #define ROUNDS(i) ({ \ CAMELLIA_ROUNDSM(io[0], io[1], \ SUBKEY_L(i + 7), SUBKEY_R(i + 7), \ io[2], io[3], il, ir); \ CAMELLIA_ROUNDSM(io[2], io[3], \ SUBKEY_L(i + 6), SUBKEY_R(i + 6), \ io[0], io[1], il, ir); \ CAMELLIA_ROUNDSM(io[0], io[1], \ SUBKEY_L(i + 5), SUBKEY_R(i + 5), \ io[2], io[3], il, ir); \ CAMELLIA_ROUNDSM(io[2], io[3], \ SUBKEY_L(i + 4), SUBKEY_R(i + 4), \ io[0], io[1], il, ir); \ CAMELLIA_ROUNDSM(io[0], io[1], \ SUBKEY_L(i + 3), SUBKEY_R(i + 3), \ io[2], io[3], il, ir); \ CAMELLIA_ROUNDSM(io[2], io[3], \ SUBKEY_L(i + 2), SUBKEY_R(i + 2), \ io[0], io[1], il, ir); \ }) #define FLS(i) ({ \ CAMELLIA_FLS(io[0], io[1], io[2], io[3], \ SUBKEY_L(i + 1), SUBKEY_R(i + 1), \ SUBKEY_L(i + 0), SUBKEY_R(i + 0), \ t0, t1, il, ir); \ }) if (i == 32) { ROUNDS(24); FLS(24); } ROUNDS(16); FLS(16); ROUNDS(8); FLS(8); ROUNDS(0); #undef ROUNDS #undef FLS /* post whitening but kw4 */ io[2] ^= SUBKEY_L(0); io[3] ^= SUBKEY_R(0); /* NB: 0,1 should be swapped with 2,3 by caller! */ } struct camellia_ctx { int key_length; u32 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u32)]; }; static int camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); const unsigned char *key = (const unsigned char *)in_key; if (key_len != 16 && key_len != 24 && key_len != 32) return -EINVAL; cctx->key_length = key_len; switch (key_len) { case 16: camellia_setup128(key, cctx->key_table); break; case 24: camellia_setup192(key, cctx->key_table); break; case 32: camellia_setup256(key, cctx->key_table); break; } return 0; } static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); unsigned int max; u32 tmp[4]; tmp[0] = get_unaligned_be32(in); tmp[1] = get_unaligned_be32(in + 4); tmp[2] = get_unaligned_be32(in + 8); tmp[3] = get_unaligned_be32(in + 12); if (cctx->key_length == 16) max = 24; else max = 32; /* for key lengths of 24 and 32 */ camellia_do_encrypt(cctx->key_table, tmp, max); /* do_encrypt returns 0,1 swapped with 2,3 */ put_unaligned_be32(tmp[2], out); put_unaligned_be32(tmp[3], out + 4); put_unaligned_be32(tmp[0], out + 8); put_unaligned_be32(tmp[1], out + 12); } static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm); unsigned int max; u32 tmp[4]; tmp[0] = get_unaligned_be32(in); tmp[1] = get_unaligned_be32(in + 4); tmp[2] = get_unaligned_be32(in + 8); tmp[3] = get_unaligned_be32(in + 12); if (cctx->key_length == 16) max = 24; else max = 32; /* for key lengths of 24 and 32 */ camellia_do_decrypt(cctx->key_table, tmp, max); /* do_decrypt returns 0,1 swapped with 2,3 */ put_unaligned_be32(tmp[2], out); put_unaligned_be32(tmp[3], out + 4); put_unaligned_be32(tmp[0], out + 8); put_unaligned_be32(tmp[1], out + 12); } static struct crypto_alg camellia_alg = { .cra_name = "camellia", .cra_driver_name = "camellia-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_ctx), .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = CAMELLIA_MIN_KEY_SIZE, .cia_max_keysize = CAMELLIA_MAX_KEY_SIZE, .cia_setkey = camellia_set_key, .cia_encrypt = camellia_encrypt, .cia_decrypt = camellia_decrypt } } }; static int __init camellia_init(void) { return crypto_register_alg(&camellia_alg); } static void __exit camellia_fini(void) { crypto_unregister_alg(&camellia_alg); } subsys_initcall(camellia_init); module_exit(camellia_fini); MODULE_DESCRIPTION("Camellia Cipher Algorithm"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("camellia"); MODULE_ALIAS_CRYPTO("camellia-generic"); |
888 418 855 853 270 25 257 858 4 286 21 71 38 14 288 3 265 2 2 2 2 240 277 251 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the Forwarding Information Base. * * Authors: A.N.Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #ifndef _NET_IP_FIB_H #define _NET_IP_FIB_H #include <net/flow.h> #include <linux/seq_file.h> #include <linux/rcupdate.h> #include <net/fib_notifier.h> #include <net/fib_rules.h> #include <net/inet_dscp.h> #include <net/inetpeer.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/refcount.h> #include <linux/ip.h> #include <linux/in_route.h> struct fib_config { u8 fc_dst_len; dscp_t fc_dscp; u8 fc_protocol; u8 fc_scope; u8 fc_type; u8 fc_gw_family; /* 2 bytes unused */ u32 fc_table; __be32 fc_dst; union { __be32 fc_gw4; struct in6_addr fc_gw6; }; int fc_oif; u32 fc_flags; u32 fc_priority; __be32 fc_prefsrc; u32 fc_nh_id; struct nlattr *fc_mx; struct rtnexthop *fc_mp; int fc_mx_len; int fc_mp_len; u32 fc_flow; u32 fc_nlflags; struct nl_info fc_nlinfo; struct nlattr *fc_encap; u16 fc_encap_type; }; struct fib_info; struct rtable; struct fib_nh_exception { struct fib_nh_exception __rcu *fnhe_next; int fnhe_genid; __be32 fnhe_daddr; u32 fnhe_pmtu; bool fnhe_mtu_locked; __be32 fnhe_gw; unsigned long fnhe_expires; struct rtable __rcu *fnhe_rth_input; struct rtable __rcu *fnhe_rth_output; unsigned long fnhe_stamp; struct rcu_head rcu; }; struct fnhe_hash_bucket { struct fib_nh_exception __rcu *chain; }; #define FNHE_HASH_SHIFT 11 #define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT) #define FNHE_RECLAIM_DEPTH 5 struct fib_nh_common { struct net_device *nhc_dev; netdevice_tracker nhc_dev_tracker; int nhc_oif; unsigned char nhc_scope; u8 nhc_family; u8 nhc_gw_family; unsigned char nhc_flags; struct lwtunnel_state *nhc_lwtstate; union { __be32 ipv4; struct in6_addr ipv6; } nhc_gw; int nhc_weight; atomic_t nhc_upper_bound; /* v4 specific, but allows fib6_nh with v4 routes */ struct rtable __rcu * __percpu *nhc_pcpu_rth_output; struct rtable __rcu *nhc_rth_input; struct fnhe_hash_bucket __rcu *nhc_exceptions; }; struct fib_nh { struct fib_nh_common nh_common; struct hlist_node nh_hash; struct fib_info *nh_parent; #ifdef CONFIG_IP_ROUTE_CLASSID __u32 nh_tclassid; #endif __be32 nh_saddr; int nh_saddr_genid; #define fib_nh_family nh_common.nhc_family #define fib_nh_dev nh_common.nhc_dev #define fib_nh_dev_tracker nh_common.nhc_dev_tracker #define fib_nh_oif nh_common.nhc_oif #define fib_nh_flags nh_common.nhc_flags #define fib_nh_lws nh_common.nhc_lwtstate #define fib_nh_scope nh_common.nhc_scope #define fib_nh_gw_family nh_common.nhc_gw_family #define fib_nh_gw4 nh_common.nhc_gw.ipv4 #define fib_nh_gw6 nh_common.nhc_gw.ipv6 #define fib_nh_weight nh_common.nhc_weight #define fib_nh_upper_bound nh_common.nhc_upper_bound }; /* * This structure contains data shared by many of routes. */ struct nexthop; struct fib_info { struct hlist_node fib_hash; struct hlist_node fib_lhash; struct list_head nh_list; struct net *fib_net; refcount_t fib_treeref; refcount_t fib_clntref; unsigned int fib_flags; unsigned char fib_dead; unsigned char fib_protocol; unsigned char fib_scope; unsigned char fib_type; __be32 fib_prefsrc; u32 fib_tb_id; u32 fib_priority; struct dst_metrics *fib_metrics; #define fib_mtu fib_metrics->metrics[RTAX_MTU-1] #define fib_window fib_metrics->metrics[RTAX_WINDOW-1] #define fib_rtt fib_metrics->metrics[RTAX_RTT-1] #define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1] int fib_nhs; bool fib_nh_is_v6; bool nh_updated; bool pfsrc_removed; struct nexthop *nh; struct rcu_head rcu; struct fib_nh fib_nh[] __counted_by(fib_nhs); }; int __net_init fib4_semantics_init(struct net *net); void __net_exit fib4_semantics_exit(struct net *net); #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_rule; #endif struct fib_table; struct fib_result { __be32 prefix; unsigned char prefixlen; unsigned char nh_sel; unsigned char type; unsigned char scope; u32 tclassid; dscp_t dscp; struct fib_nh_common *nhc; struct fib_info *fi; struct fib_table *table; struct hlist_head *fa_head; }; struct fib_result_nl { __be32 fl_addr; /* To be looked up*/ u32 fl_mark; unsigned char fl_tos; unsigned char fl_scope; unsigned char tb_id_in; unsigned char tb_id; /* Results */ unsigned char prefixlen; unsigned char nh_sel; unsigned char type; unsigned char scope; int err; }; #ifdef CONFIG_IP_MULTIPLE_TABLES #define FIB_TABLE_HASHSZ 256 #else #define FIB_TABLE_HASHSZ 2 #endif __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc, unsigned char scope); __be32 fib_result_prefsrc(struct net *net, struct fib_result *res); #define FIB_RES_NHC(res) ((res).nhc) #define FIB_RES_DEV(res) (FIB_RES_NHC(res)->nhc_dev) #define FIB_RES_OIF(res) (FIB_RES_NHC(res)->nhc_oif) struct fib_rt_info { struct fib_info *fi; u32 tb_id; __be32 dst; int dst_len; dscp_t dscp; u8 type; u8 offload:1, trap:1, offload_failed:1, unused:5; }; struct fib_entry_notifier_info { struct fib_notifier_info info; /* must be first */ u32 dst; int dst_len; struct fib_info *fi; dscp_t dscp; u8 type; u32 tb_id; }; struct fib_nh_notifier_info { struct fib_notifier_info info; /* must be first */ struct fib_nh *fib_nh; }; int call_fib4_notifier(struct notifier_block *nb, enum fib_event_type event_type, struct fib_notifier_info *info); int call_fib4_notifiers(struct net *net, enum fib_event_type event_type, struct fib_notifier_info *info); int __net_init fib4_notifier_init(struct net *net); void __net_exit fib4_notifier_exit(struct net *net); void fib_info_notify_update(struct net *net, struct nl_info *info); int fib_notify(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); struct fib_table { struct hlist_node tb_hlist; u32 tb_id; int tb_num_default; struct rcu_head rcu; unsigned long *tb_data; unsigned long __data[]; }; struct fib_dump_filter { u32 table_id; /* filter_set is an optimization that an entry is set */ bool filter_set; bool dump_routes; bool dump_exceptions; bool rtnl_held; unsigned char protocol; unsigned char rt_type; unsigned int flags; struct net_device *dev; }; int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags); int fib_table_insert(struct net *, struct fib_table *, struct fib_config *, struct netlink_ext_ack *extack); int fib_table_delete(struct net *, struct fib_table *, struct fib_config *, struct netlink_ext_ack *extack); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb, struct fib_dump_filter *filter); int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all); struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); void fib_table_flush_external(struct fib_table *table); void fib_free_table(struct fib_table *tb); #ifndef CONFIG_IP_MULTIPLE_TABLES #define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1)) #define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1)) static inline struct fib_table *fib_get_table(struct net *net, u32 id) { struct hlist_node *tb_hlist; struct hlist_head *ptr; ptr = id == RT_TABLE_LOCAL ? &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] : &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]; tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr)); return hlist_entry(tb_hlist, struct fib_table, tb_hlist); } static inline struct fib_table *fib_new_table(struct net *net, u32 id) { return fib_get_table(net, id); } static inline int fib_lookup(struct net *net, const struct flowi4 *flp, struct fib_result *res, unsigned int flags) { struct fib_table *tb; int err = -ENETUNREACH; rcu_read_lock(); tb = fib_get_table(net, RT_TABLE_MAIN); if (tb) err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF); if (err == -EAGAIN) err = -ENETUNREACH; rcu_read_unlock(); return err; } static inline bool fib4_has_custom_rules(const struct net *net) { return false; } static inline bool fib4_rule_default(const struct fib_rule *rule) { return true; } static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack) { return 0; } static inline unsigned int fib4_rules_seq_read(const struct net *net) { return 0; } static inline bool fib4_rules_early_flow_dissect(struct net *net, struct sk_buff *skb, struct flowi4 *fl4, struct flow_keys *flkeys) { return false; } #else /* CONFIG_IP_MULTIPLE_TABLES */ int __net_init fib4_rules_init(struct net *net); void __net_exit fib4_rules_exit(struct net *net); struct fib_table *fib_new_table(struct net *net, u32 id); struct fib_table *fib_get_table(struct net *net, u32 id); int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res, unsigned int flags); static inline int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res, unsigned int flags) { struct fib_table *tb; int err = -ENETUNREACH; flags |= FIB_LOOKUP_NOREF; if (net->ipv4.fib_has_custom_rules) return __fib_lookup(net, flp, res, flags); rcu_read_lock(); res->tclassid = 0; tb = rcu_dereference_rtnl(net->ipv4.fib_main); if (tb) err = fib_table_lookup(tb, flp, res, flags); if (!err) goto out; tb = rcu_dereference_rtnl(net->ipv4.fib_default); if (tb) err = fib_table_lookup(tb, flp, res, flags); out: if (err == -EAGAIN) err = -ENETUNREACH; rcu_read_unlock(); return err; } static inline bool fib4_has_custom_rules(const struct net *net) { return net->ipv4.fib_has_custom_rules; } bool fib4_rule_default(const struct fib_rule *rule); int fib4_rules_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); unsigned int fib4_rules_seq_read(const struct net *net); static inline bool fib4_rules_early_flow_dissect(struct net *net, struct sk_buff *skb, struct flowi4 *fl4, struct flow_keys *flkeys) { unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; if (!net->ipv4.fib_rules_require_fldissect) return false; memset(flkeys, 0, sizeof(*flkeys)); __skb_flow_dissect(net, skb, &flow_keys_dissector, flkeys, NULL, 0, 0, 0, flag); fl4->fl4_sport = flkeys->ports.src; fl4->fl4_dport = flkeys->ports.dst; fl4->flowi4_proto = flkeys->basic.ip_proto; return true; } #endif /* CONFIG_IP_MULTIPLE_TABLES */ static inline bool fib_dscp_masked_match(dscp_t dscp, const struct flowi4 *fl4) { return dscp == inet_dsfield_to_dscp(RT_TOS(fl4->flowi4_tos)); } /* Exported by fib_frontend.c */ extern const struct nla_policy rtm_ipv4_policy[]; void ip_fib_init(void); int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla, struct netlink_ext_ack *extack); __be32 fib_compute_spec_dst(struct sk_buff *skb); bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev); int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, dscp_t dscp, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); static inline enum skb_drop_reason fib_validate_source_reason(struct sk_buff *skb, __be32 src, __be32 dst, dscp_t dscp, int oif, struct net_device *dev, struct in_device *idev, u32 *itag) { int err = fib_validate_source(skb, src, dst, dscp, oif, dev, idev, itag); if (err < 0) return -err; return SKB_NOT_DROPPED_YET; } #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { return atomic_read(&net->ipv4.fib_num_tclassid_users); } #else static inline int fib_num_tclassid_users(struct net *net) { return 0; } #endif int fib_unmerge(struct net *net); static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc, const struct net_device *dev) { if (nhc->nhc_dev == dev || l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) return true; return false; } /* Exported by fib_semantics.c */ int ip_fib_check_default(__be32 gw, struct net_device *dev); int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); int fib_sync_down_addr(struct net_device *dev, __be32 local); int fib_sync_up(struct net_device *dev, unsigned char nh_flags); void fib_sync_mtu(struct net_device *dev, u32 orig_mtu); void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig); /* Fields used for sysctl_fib_multipath_hash_fields. * Common to IPv4 and IPv6. * * Add new fields at the end. This is user API. */ #define FIB_MULTIPATH_HASH_FIELD_SRC_IP BIT(0) #define FIB_MULTIPATH_HASH_FIELD_DST_IP BIT(1) #define FIB_MULTIPATH_HASH_FIELD_IP_PROTO BIT(2) #define FIB_MULTIPATH_HASH_FIELD_FLOWLABEL BIT(3) #define FIB_MULTIPATH_HASH_FIELD_SRC_PORT BIT(4) #define FIB_MULTIPATH_HASH_FIELD_DST_PORT BIT(5) #define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP BIT(6) #define FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP BIT(7) #define FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO BIT(8) #define FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL BIT(9) #define FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT BIT(10) #define FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT BIT(11) #define FIB_MULTIPATH_HASH_FIELD_OUTER_MASK \ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \ FIB_MULTIPATH_HASH_FIELD_DST_IP | \ FIB_MULTIPATH_HASH_FIELD_IP_PROTO | \ FIB_MULTIPATH_HASH_FIELD_FLOWLABEL | \ FIB_MULTIPATH_HASH_FIELD_SRC_PORT | \ FIB_MULTIPATH_HASH_FIELD_DST_PORT) #define FIB_MULTIPATH_HASH_FIELD_INNER_MASK \ (FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP | \ FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP | \ FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO | \ FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL | \ FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT | \ FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) #define FIB_MULTIPATH_HASH_FIELD_ALL_MASK \ (FIB_MULTIPATH_HASH_FIELD_OUTER_MASK | \ FIB_MULTIPATH_HASH_FIELD_INNER_MASK) #define FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK \ (FIB_MULTIPATH_HASH_FIELD_SRC_IP | \ FIB_MULTIPATH_HASH_FIELD_DST_IP | \ FIB_MULTIPATH_HASH_FIELD_IP_PROTO) #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, const struct sk_buff *skb, struct flow_keys *flkeys); static void fib_multipath_hash_construct_key(siphash_key_t *key, u32 mp_seed) { u64 mp_seed_64 = mp_seed; key->key[0] = (mp_seed_64 << 32) | mp_seed_64; key->key[1] = key->key[0]; } static inline u32 fib_multipath_hash_from_keys(const struct net *net, struct flow_keys *keys) { siphash_aligned_key_t hash_key; u32 mp_seed; mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed; fib_multipath_hash_construct_key(&hash_key, mp_seed); return flow_hash_from_keys_seed(keys, &hash_key); } #else static inline u32 fib_multipath_hash_from_keys(const struct net *net, struct flow_keys *keys) { return flow_hash_from_keys(keys); } #endif int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope, struct netlink_ext_ack *extack); void fib_select_multipath(struct fib_result *res, int hash); void fib_select_path(struct net *net, struct fib_result *res, struct flowi4 *fl4, const struct sk_buff *skb); int fib_nh_init(struct net *net, struct fib_nh *fib_nh, struct fib_config *cfg, int nh_weight, struct netlink_ext_ack *extack); void fib_nh_release(struct net *net, struct fib_nh *fib_nh); int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc, struct nlattr *fc_encap, u16 fc_encap_type, void *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void fib_nh_common_release(struct fib_nh_common *nhc); /* Exported by fib_trie.c */ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri); void fib_trie_init(void); struct fib_table *fib_trie_table(u32 id, struct fib_table *alias); bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, const struct flowi4 *flp); static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) { #ifdef CONFIG_IP_ROUTE_CLASSID struct fib_nh_common *nhc = res->nhc; #ifdef CONFIG_IP_MULTIPLE_TABLES u32 rtag; #endif if (nhc->nhc_family == AF_INET) { struct fib_nh *nh; nh = container_of(nhc, struct fib_nh, nh_common); *itag = nh->nh_tclassid << 16; } else { *itag = 0; } #ifdef CONFIG_IP_MULTIPLE_TABLES rtag = res->tclassid; if (*itag == 0) *itag = (rtag<<16); *itag |= (rtag>>16); #endif #endif } void fib_flush(struct net *net); void free_fib_info(struct fib_info *fi); static inline void fib_info_hold(struct fib_info *fi) { refcount_inc(&fi->fib_clntref); } static inline void fib_info_put(struct fib_info *fi) { if (refcount_dec_and_test(&fi->fib_clntref)) free_fib_info(fi); } #ifdef CONFIG_PROC_FS int __net_init fib_proc_init(struct net *net); void __net_exit fib_proc_exit(struct net *net); #else static inline int fib_proc_init(struct net *net) { return 0; } static inline void fib_proc_exit(struct net *net) { } #endif u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr); int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, struct fib_dump_filter *filter, struct netlink_callback *cb); int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh, u8 rt_family, unsigned char *flags, bool skip_oif); int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh, int nh_weight, u8 rt_family, u32 nh_tclassid); #endif /* _NET_FIB_H */ |
26 30 30 6 21 30 19 2 8 9 25 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IPV6_FRAG_H #define _IPV6_FRAG_H #include <linux/icmpv6.h> #include <linux/kernel.h> #include <net/addrconf.h> #include <net/ipv6.h> #include <net/inet_frag.h> enum ip6_defrag_users { IP6_DEFRAG_LOCAL_DELIVER, IP6_DEFRAG_CONNTRACK_IN, __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX, IP6_DEFRAG_CONNTRACK_OUT, __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX, IP6_DEFRAG_CONNTRACK_BRIDGE_IN, __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, }; /* * Equivalent of ipv4 struct ip */ struct frag_queue { struct inet_frag_queue q; int iif; __u16 nhoffset; u8 ecn; }; #if IS_ENABLED(CONFIG_IPV6) static inline void ip6frag_init(struct inet_frag_queue *q, const void *a) { struct frag_queue *fq = container_of(q, struct frag_queue, q); const struct frag_v6_compare_key *key = a; q->key.v6 = *key; fq->ecn = 0; } static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed) { return jhash2(data, sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); } static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed) { const struct inet_frag_queue *fq = data; return jhash2((const u32 *)&fq->key.v6, sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); } static inline int ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) { const struct frag_v6_compare_key *key = arg->key; const struct inet_frag_queue *fq = ptr; return !!memcmp(&fq->key, key, sizeof(*key)); } static inline void ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) { struct net_device *dev = NULL; struct sk_buff *head; int refs = 1; rcu_read_lock(); /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */ if (READ_ONCE(fq->q.fqdir->dead)) goto out_rcu_unlock; spin_lock(&fq->q.lock); if (fq->q.flags & INET_FRAG_COMPLETE) goto out; fq->q.flags |= INET_FRAG_DROP; inet_frag_kill(&fq->q, &refs); dev = dev_get_by_index_rcu(net, fq->iif); if (!dev) goto out; __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); /* Don't send error if the first segment did not arrive. */ if (!(fq->q.flags & INET_FRAG_FIRST_IN)) goto out; /* sk_buff::dev and sk_buff::rbnode are unionized. So we * pull the head out of the tree in order to be able to * deal with head->dev. */ head = inet_frag_pull_head(&fq->q); if (!head) goto out; head->dev = dev; spin_unlock(&fq->q.lock); icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); kfree_skb_reason(head, SKB_DROP_REASON_FRAG_REASM_TIMEOUT); goto out_rcu_unlock; out: spin_unlock(&fq->q.lock); out_rcu_unlock: rcu_read_unlock(); inet_frag_putn(&fq->q, refs); } /* Check if the upper layer header is truncated in the first fragment. */ static inline bool ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp) { u8 nexthdr = *nexthdrp; __be16 frag_off; int offset; offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off); if (offset < 0 || (frag_off & htons(IP6_OFFSET))) return false; switch (nexthdr) { case NEXTHDR_TCP: offset += sizeof(struct tcphdr); break; case NEXTHDR_UDP: offset += sizeof(struct udphdr); break; case NEXTHDR_ICMP: offset += sizeof(struct icmp6hdr); break; default: offset += 1; } if (offset > skb->len) return true; return false; } #endif #endif |
3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PFN_T_H_ #define _LINUX_PFN_T_H_ #include <linux/mm.h> /* * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags * PFN_SG_CHAIN - pfn is a pointer to the next scatterlist entry * PFN_SG_LAST - pfn references a page and is the last scatterlist entry * PFN_DEV - pfn is not covered by system memmap by default * PFN_MAP - pfn has a dynamic page mapping established by a device driver * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not * get_user_pages */ #define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) #define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) #define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) #define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5)) #define PFN_FLAGS_TRACE \ { PFN_SPECIAL, "SPECIAL" }, \ { PFN_SG_CHAIN, "SG_CHAIN" }, \ { PFN_SG_LAST, "SG_LAST" }, \ { PFN_DEV, "DEV" }, \ { PFN_MAP, "MAP" } static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) { pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; return pfn_t; } /* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */ static inline pfn_t pfn_to_pfn_t(unsigned long pfn) { return __pfn_to_pfn_t(pfn, 0); } static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags) { return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); } static inline bool pfn_t_has_page(pfn_t pfn) { return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0; } static inline unsigned long pfn_t_to_pfn(pfn_t pfn) { return pfn.val & ~PFN_FLAGS_MASK; } static inline struct page *pfn_t_to_page(pfn_t pfn) { if (pfn_t_has_page(pfn)) return pfn_to_page(pfn_t_to_pfn(pfn)); return NULL; } static inline phys_addr_t pfn_t_to_phys(pfn_t pfn) { return PFN_PHYS(pfn_t_to_pfn(pfn)); } static inline pfn_t page_to_pfn_t(struct page *page) { return pfn_to_pfn_t(page_to_pfn(page)); } static inline int pfn_t_valid(pfn_t pfn) { return pfn_valid(pfn_t_to_pfn(pfn)); } #ifdef CONFIG_MMU static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot) { return pfn_pte(pfn_t_to_pfn(pfn), pgprot); } #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) { return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) { return pfn_pud(pfn_t_to_pfn(pfn), pgprot); } #endif #endif #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP static inline bool pfn_t_devmap(pfn_t pfn) { const u64 flags = PFN_DEV|PFN_MAP; return (pfn.val & flags) == flags; } #else static inline bool pfn_t_devmap(pfn_t pfn) { return false; } pte_t pte_mkdevmap(pte_t pte); pmd_t pmd_mkdevmap(pmd_t pmd); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) pud_t pud_mkdevmap(pud_t pud); #endif #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL static inline bool pfn_t_special(pfn_t pfn) { return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL; } #else static inline bool pfn_t_special(pfn_t pfn) { return false; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #endif /* _LINUX_PFN_T_H_ */ |
1 2 1 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 | // SPDX-License-Identifier: GPL-2.0-or-later /* * USB SD Host Controller (USHC) controller driver. * * Copyright (C) 2010 Cambridge Silicon Radio Ltd. * * Notes: * - Only version 2 devices are supported. * - Version 2 devices only support SDIO cards/devices (R2 response is * unsupported). * * References: * [USHC] USB SD Host Controller specification (CS-118793-SP) */ #include <linux/module.h> #include <linux/usb.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/mmc/host.h> enum ushc_request { USHC_GET_CAPS = 0x00, USHC_HOST_CTRL = 0x01, USHC_PWR_CTRL = 0x02, USHC_CLK_FREQ = 0x03, USHC_EXEC_CMD = 0x04, USHC_READ_RESP = 0x05, USHC_RESET = 0x06, }; enum ushc_request_type { USHC_GET_CAPS_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, USHC_HOST_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, USHC_PWR_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, USHC_CLK_FREQ_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, USHC_EXEC_CMD_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, USHC_READ_RESP_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, USHC_RESET_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, }; #define USHC_GET_CAPS_VERSION_MASK 0xff #define USHC_GET_CAPS_3V3 (1 << 8) #define USHC_GET_CAPS_3V0 (1 << 9) #define USHC_GET_CAPS_1V8 (1 << 10) #define USHC_GET_CAPS_HIGH_SPD (1 << 16) #define USHC_HOST_CTRL_4BIT (1 << 1) #define USHC_HOST_CTRL_HIGH_SPD (1 << 0) #define USHC_PWR_CTRL_OFF 0x00 #define USHC_PWR_CTRL_3V3 0x01 #define USHC_PWR_CTRL_3V0 0x02 #define USHC_PWR_CTRL_1V8 0x03 #define USHC_READ_RESP_BUSY (1 << 4) #define USHC_READ_RESP_ERR_TIMEOUT (1 << 3) #define USHC_READ_RESP_ERR_CRC (1 << 2) #define USHC_READ_RESP_ERR_DAT (1 << 1) #define USHC_READ_RESP_ERR_CMD (1 << 0) #define USHC_READ_RESP_ERR_MASK 0x0f struct ushc_cbw { __u8 signature; __u8 cmd_idx; __le16 block_size; __le32 arg; } __attribute__((packed)); #define USHC_CBW_SIGNATURE 'C' struct ushc_csw { __u8 signature; __u8 status; __le32 response; } __attribute__((packed)); #define USHC_CSW_SIGNATURE 'S' struct ushc_int_data { u8 status; u8 reserved[3]; }; #define USHC_INT_STATUS_SDIO_INT (1 << 1) #define USHC_INT_STATUS_CARD_PRESENT (1 << 0) struct ushc_data { struct usb_device *usb_dev; struct mmc_host *mmc; struct urb *int_urb; struct ushc_int_data *int_data; struct urb *cbw_urb; struct ushc_cbw *cbw; struct urb *data_urb; struct urb *csw_urb; struct ushc_csw *csw; spinlock_t lock; struct mmc_request *current_req; u32 caps; u16 host_ctrl; unsigned long flags; u8 last_status; int clock_freq; }; #define DISCONNECTED 0 #define INT_EN 1 #define IGNORE_NEXT_INT 2 static void data_callback(struct urb *urb); static int ushc_hw_reset(struct ushc_data *ushc) { return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0), USHC_RESET, USHC_RESET_TYPE, 0, 0, NULL, 0, 100); } static int ushc_hw_get_caps(struct ushc_data *ushc) { int ret; int version; ret = usb_control_msg(ushc->usb_dev, usb_rcvctrlpipe(ushc->usb_dev, 0), USHC_GET_CAPS, USHC_GET_CAPS_TYPE, 0, 0, &ushc->caps, sizeof(ushc->caps), 100); if (ret < 0) return ret; ushc->caps = le32_to_cpu(ushc->caps); version = ushc->caps & USHC_GET_CAPS_VERSION_MASK; if (version != 0x02) { dev_err(&ushc->usb_dev->dev, "controller version %d is not supported\n", version); return -EINVAL; } return 0; } static int ushc_hw_set_host_ctrl(struct ushc_data *ushc, u16 mask, u16 val) { u16 host_ctrl; int ret; host_ctrl = (ushc->host_ctrl & ~mask) | val; ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0), USHC_HOST_CTRL, USHC_HOST_CTRL_TYPE, host_ctrl, 0, NULL, 0, 100); if (ret < 0) return ret; ushc->host_ctrl = host_ctrl; return 0; } static void int_callback(struct urb *urb) { struct ushc_data *ushc = urb->context; u8 status, last_status; if (urb->status < 0) return; status = ushc->int_data->status; last_status = ushc->last_status; ushc->last_status = status; /* * Ignore the card interrupt status on interrupt transfers that * were submitted while card interrupts where disabled. * * This avoid occasional spurious interrupts when enabling * interrupts immediately after clearing the source on the card. */ if (!test_and_clear_bit(IGNORE_NEXT_INT, &ushc->flags) && test_bit(INT_EN, &ushc->flags) && status & USHC_INT_STATUS_SDIO_INT) { mmc_signal_sdio_irq(ushc->mmc); } if ((status ^ last_status) & USHC_INT_STATUS_CARD_PRESENT) mmc_detect_change(ushc->mmc, msecs_to_jiffies(100)); if (!test_bit(INT_EN, &ushc->flags)) set_bit(IGNORE_NEXT_INT, &ushc->flags); usb_submit_urb(ushc->int_urb, GFP_ATOMIC); } static void cbw_callback(struct urb *urb) { struct ushc_data *ushc = urb->context; if (urb->status != 0) { usb_unlink_urb(ushc->data_urb); usb_unlink_urb(ushc->csw_urb); } } static void data_callback(struct urb *urb) { struct ushc_data *ushc = urb->context; if (urb->status != 0) usb_unlink_urb(ushc->csw_urb); } static void csw_callback(struct urb *urb) { struct ushc_data *ushc = urb->context; struct mmc_request *req = ushc->current_req; int status; status = ushc->csw->status; if (urb->status != 0) { req->cmd->error = urb->status; } else if (status & USHC_READ_RESP_ERR_CMD) { if (status & USHC_READ_RESP_ERR_CRC) req->cmd->error = -EIO; else req->cmd->error = -ETIMEDOUT; } if (req->data) { if (status & USHC_READ_RESP_ERR_DAT) { if (status & USHC_READ_RESP_ERR_CRC) req->data->error = -EIO; else req->data->error = -ETIMEDOUT; req->data->bytes_xfered = 0; } else { req->data->bytes_xfered = req->data->blksz * req->data->blocks; } } req->cmd->resp[0] = le32_to_cpu(ushc->csw->response); mmc_request_done(ushc->mmc, req); } static void ushc_request(struct mmc_host *mmc, struct mmc_request *req) { struct ushc_data *ushc = mmc_priv(mmc); int ret; unsigned long flags; spin_lock_irqsave(&ushc->lock, flags); if (test_bit(DISCONNECTED, &ushc->flags)) { ret = -ENODEV; goto out; } /* Version 2 firmware doesn't support the R2 response format. */ if (req->cmd->flags & MMC_RSP_136) { ret = -EINVAL; goto out; } /* The Astoria's data FIFOs don't work with clock speeds < 5MHz so limit commands with data to 6MHz or more. */ if (req->data && ushc->clock_freq < 6000000) { ret = -EINVAL; goto out; } ushc->current_req = req; /* Start cmd with CBW. */ ushc->cbw->cmd_idx = cpu_to_le16(req->cmd->opcode); if (req->data) ushc->cbw->block_size = cpu_to_le16(req->data->blksz); else ushc->cbw->block_size = 0; ushc->cbw->arg = cpu_to_le32(req->cmd->arg); ret = usb_submit_urb(ushc->cbw_urb, GFP_ATOMIC); if (ret < 0) goto out; /* Submit data (if any). */ if (req->data) { struct mmc_data *data = req->data; int pipe; if (data->flags & MMC_DATA_READ) pipe = usb_rcvbulkpipe(ushc->usb_dev, 6); else pipe = usb_sndbulkpipe(ushc->usb_dev, 2); usb_fill_bulk_urb(ushc->data_urb, ushc->usb_dev, pipe, NULL, data->sg->length, data_callback, ushc); ushc->data_urb->num_sgs = 1; ushc->data_urb->sg = data->sg; ret = usb_submit_urb(ushc->data_urb, GFP_ATOMIC); if (ret < 0) goto out; } /* Submit CSW. */ ret = usb_submit_urb(ushc->csw_urb, GFP_ATOMIC); out: spin_unlock_irqrestore(&ushc->lock, flags); if (ret < 0) { usb_unlink_urb(ushc->cbw_urb); usb_unlink_urb(ushc->data_urb); req->cmd->error = ret; mmc_request_done(mmc, req); } } static int ushc_set_power(struct ushc_data *ushc, unsigned char power_mode) { u16 voltage; switch (power_mode) { case MMC_POWER_OFF: voltage = USHC_PWR_CTRL_OFF; break; case MMC_POWER_UP: case MMC_POWER_ON: voltage = USHC_PWR_CTRL_3V3; break; default: return -EINVAL; } return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0), USHC_PWR_CTRL, USHC_PWR_CTRL_TYPE, voltage, 0, NULL, 0, 100); } static int ushc_set_bus_width(struct ushc_data *ushc, int bus_width) { return ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_4BIT, bus_width == 4 ? USHC_HOST_CTRL_4BIT : 0); } static int ushc_set_bus_freq(struct ushc_data *ushc, int clk, bool enable_hs) { int ret; /* Hardware can't detect interrupts while the clock is off. */ if (clk == 0) clk = 400000; ret = ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_HIGH_SPD, enable_hs ? USHC_HOST_CTRL_HIGH_SPD : 0); if (ret < 0) return ret; ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0), USHC_CLK_FREQ, USHC_CLK_FREQ_TYPE, clk & 0xffff, (clk >> 16) & 0xffff, NULL, 0, 100); if (ret < 0) return ret; ushc->clock_freq = clk; return 0; } static void ushc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct ushc_data *ushc = mmc_priv(mmc); ushc_set_power(ushc, ios->power_mode); ushc_set_bus_width(ushc, 1 << ios->bus_width); ushc_set_bus_freq(ushc, ios->clock, ios->timing == MMC_TIMING_SD_HS); } static int ushc_get_cd(struct mmc_host *mmc) { struct ushc_data *ushc = mmc_priv(mmc); return !!(ushc->last_status & USHC_INT_STATUS_CARD_PRESENT); } static void ushc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct ushc_data *ushc = mmc_priv(mmc); if (enable) set_bit(INT_EN, &ushc->flags); else clear_bit(INT_EN, &ushc->flags); } static void ushc_clean_up(struct ushc_data *ushc) { usb_free_urb(ushc->int_urb); usb_free_urb(ushc->csw_urb); usb_free_urb(ushc->data_urb); usb_free_urb(ushc->cbw_urb); kfree(ushc->int_data); kfree(ushc->cbw); kfree(ushc->csw); mmc_free_host(ushc->mmc); } static const struct mmc_host_ops ushc_ops = { .request = ushc_request, .set_ios = ushc_set_ios, .get_cd = ushc_get_cd, .enable_sdio_irq = ushc_enable_sdio_irq, }; static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); struct mmc_host *mmc; struct ushc_data *ushc; int ret; if (intf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); if (mmc == NULL) return -ENOMEM; ushc = mmc_priv(mmc); usb_set_intfdata(intf, ushc); ushc->usb_dev = usb_dev; ushc->mmc = mmc; spin_lock_init(&ushc->lock); ret = ushc_hw_reset(ushc); if (ret < 0) goto err; /* Read capabilities. */ ret = ushc_hw_get_caps(ushc); if (ret < 0) goto err; mmc->ops = &ushc_ops; mmc->f_min = 400000; mmc->f_max = 50000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; mmc->caps |= (ushc->caps & USHC_GET_CAPS_HIGH_SPD) ? MMC_CAP_SD_HIGHSPEED : 0; mmc->max_seg_size = 512*511; mmc->max_segs = 1; mmc->max_req_size = 512*511; mmc->max_blk_size = 512; mmc->max_blk_count = 511; ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL); if (ushc->int_urb == NULL) { ret = -ENOMEM; goto err; } ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL); if (ushc->int_data == NULL) { ret = -ENOMEM; goto err; } usb_fill_int_urb(ushc->int_urb, ushc->usb_dev, usb_rcvintpipe(usb_dev, intf->cur_altsetting->endpoint[0].desc.bEndpointAddress), ushc->int_data, sizeof(struct ushc_int_data), int_callback, ushc, intf->cur_altsetting->endpoint[0].desc.bInterval); ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL); if (ushc->cbw_urb == NULL) { ret = -ENOMEM; goto err; } ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); if (ushc->cbw == NULL) { ret = -ENOMEM; goto err; } ushc->cbw->signature = USHC_CBW_SIGNATURE; usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2), ushc->cbw, sizeof(struct ushc_cbw), cbw_callback, ushc); ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL); if (ushc->data_urb == NULL) { ret = -ENOMEM; goto err; } ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL); if (ushc->csw_urb == NULL) { ret = -ENOMEM; goto err; } ushc->csw = kzalloc(sizeof(struct ushc_csw), GFP_KERNEL); if (ushc->csw == NULL) { ret = -ENOMEM; goto err; } usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6), ushc->csw, sizeof(struct ushc_csw), csw_callback, ushc); ret = mmc_add_host(ushc->mmc); if (ret) goto err; ret = usb_submit_urb(ushc->int_urb, GFP_KERNEL); if (ret < 0) { mmc_remove_host(ushc->mmc); goto err; } return 0; err: ushc_clean_up(ushc); return ret; } static void ushc_disconnect(struct usb_interface *intf) { struct ushc_data *ushc = usb_get_intfdata(intf); spin_lock_irq(&ushc->lock); set_bit(DISCONNECTED, &ushc->flags); spin_unlock_irq(&ushc->lock); usb_kill_urb(ushc->int_urb); usb_kill_urb(ushc->cbw_urb); usb_kill_urb(ushc->data_urb); usb_kill_urb(ushc->csw_urb); mmc_remove_host(ushc->mmc); ushc_clean_up(ushc); } static struct usb_device_id ushc_id_table[] = { /* CSR USB SD Host Controller */ { USB_DEVICE(0x0a12, 0x5d10) }, { }, }; MODULE_DEVICE_TABLE(usb, ushc_id_table); static struct usb_driver ushc_driver = { .name = "ushc", .id_table = ushc_id_table, .probe = ushc_probe, .disconnect = ushc_disconnect, }; module_usb_driver(ushc_driver); MODULE_DESCRIPTION("USB SD Host Controller driver"); MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>"); MODULE_LICENSE("GPL"); |
21 21 20 1 20 7 7 7 4 19 20 12 12 11 1 1 1 1 1 1 11 10 7 6 1 6 1 6 6 5 6 6 6 6 6 2 6 1 6 6 1 6 6 6 6 1 5 6 1 6 1 1 21 6 14 21 21 21 17 3 3 3 3 18 21 1 21 3 3 3 3 3 3 3 2 2 3 3 3 3 3 3 3 3 3 3 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/cls_flow.c Generic flow classifier * * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/pkt_cls.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/if_vlan.h> #include <linux/slab.h> #include <linux/module.h> #include <net/inet_sock.h> #include <net/pkt_cls.h> #include <net/ip.h> #include <net/route.h> #include <net/flow_dissector.h> #include <net/tc_wrapper.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <net/netfilter/nf_conntrack.h> #endif struct flow_head { struct list_head filters; struct rcu_head rcu; }; struct flow_filter { struct list_head list; struct tcf_exts exts; struct tcf_ematch_tree ematches; struct tcf_proto *tp; struct timer_list perturb_timer; u32 perturb_period; u32 handle; u32 nkeys; u32 keymask; u32 mode; u32 mask; u32 xor; u32 rshift; u32 addend; u32 divisor; u32 baseclass; u32 hashrnd; struct rcu_work rwork; }; static inline u32 addr_fold(void *addr) { unsigned long a = (unsigned long)addr; return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); } static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) { __be32 src = flow_get_u32_src(flow); if (src) return ntohl(src); return addr_fold(skb->sk); } static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) { __be32 dst = flow_get_u32_dst(flow); if (dst) return ntohl(dst); return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); } static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow) { return flow->basic.ip_proto; } static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) { if (flow->ports.ports) return ntohs(flow->ports.src); return addr_fold(skb->sk); } static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) { if (flow->ports.ports) return ntohs(flow->ports.dst); return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); } static u32 flow_get_iif(const struct sk_buff *skb) { return skb->skb_iif; } static u32 flow_get_priority(const struct sk_buff *skb) { return skb->priority; } static u32 flow_get_mark(const struct sk_buff *skb) { return skb->mark; } static u32 flow_get_nfct(const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) return addr_fold(skb_nfct(skb)); #else return 0; #endif } #if IS_ENABLED(CONFIG_NF_CONNTRACK) #define CTTUPLE(skb, member) \ ({ \ enum ip_conntrack_info ctinfo; \ const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \ if (ct == NULL) \ goto fallback; \ ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \ }) #else #define CTTUPLE(skb, member) \ ({ \ goto fallback; \ 0; \ }) #endif static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) { switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, src.u3.ip)); case htons(ETH_P_IPV6): return ntohl(CTTUPLE(skb, src.u3.ip6[3])); } fallback: return flow_get_src(skb, flow); } static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) { switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, dst.u3.ip)); case htons(ETH_P_IPV6): return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); } fallback: return flow_get_dst(skb, flow); } static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) { return ntohs(CTTUPLE(skb, src.u.all)); fallback: return flow_get_proto_src(skb, flow); } static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) { return ntohs(CTTUPLE(skb, dst.u.all)); fallback: return flow_get_proto_dst(skb, flow); } static u32 flow_get_rtclassid(const struct sk_buff *skb) { #ifdef CONFIG_IP_ROUTE_CLASSID if (skb_dst(skb)) return skb_dst(skb)->tclassid; #endif return 0; } static u32 flow_get_skuid(const struct sk_buff *skb) { struct sock *sk = skb_to_full_sk(skb); if (sk && sk->sk_socket && sk->sk_socket->file) { kuid_t skuid = sk->sk_socket->file->f_cred->fsuid; return from_kuid(&init_user_ns, skuid); } return 0; } static u32 flow_get_skgid(const struct sk_buff *skb) { struct sock *sk = skb_to_full_sk(skb); if (sk && sk->sk_socket && sk->sk_socket->file) { kgid_t skgid = sk->sk_socket->file->f_cred->fsgid; return from_kgid(&init_user_ns, skgid); } return 0; } static u32 flow_get_vlan_tag(const struct sk_buff *skb) { u16 tag; if (vlan_get_tag(skb, &tag) < 0) return 0; return tag & VLAN_VID_MASK; } static u32 flow_get_rxhash(struct sk_buff *skb) { return skb_get_hash(skb); } static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) { switch (key) { case FLOW_KEY_SRC: return flow_get_src(skb, flow); case FLOW_KEY_DST: return flow_get_dst(skb, flow); case FLOW_KEY_PROTO: return flow_get_proto(skb, flow); case FLOW_KEY_PROTO_SRC: return flow_get_proto_src(skb, flow); case FLOW_KEY_PROTO_DST: return flow_get_proto_dst(skb, flow); case FLOW_KEY_IIF: return flow_get_iif(skb); case FLOW_KEY_PRIORITY: return flow_get_priority(skb); case FLOW_KEY_MARK: return flow_get_mark(skb); case FLOW_KEY_NFCT: return flow_get_nfct(skb); case FLOW_KEY_NFCT_SRC: return flow_get_nfct_src(skb, flow); case FLOW_KEY_NFCT_DST: return flow_get_nfct_dst(skb, flow); case FLOW_KEY_NFCT_PROTO_SRC: return flow_get_nfct_proto_src(skb, flow); case FLOW_KEY_NFCT_PROTO_DST: return flow_get_nfct_proto_dst(skb, flow); case FLOW_KEY_RTCLASSID: return flow_get_rtclassid(skb); case FLOW_KEY_SKUID: return flow_get_skuid(skb); case FLOW_KEY_SKGID: return flow_get_skgid(skb); case FLOW_KEY_VLAN_TAG: return flow_get_vlan_tag(skb); case FLOW_KEY_RXHASH: return flow_get_rxhash(skb); default: WARN_ON(1); return 0; } } #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \ (1 << FLOW_KEY_DST) | \ (1 << FLOW_KEY_PROTO) | \ (1 << FLOW_KEY_PROTO_SRC) | \ (1 << FLOW_KEY_PROTO_DST) | \ (1 << FLOW_KEY_NFCT_SRC) | \ (1 << FLOW_KEY_NFCT_DST) | \ (1 << FLOW_KEY_NFCT_PROTO_SRC) | \ (1 << FLOW_KEY_NFCT_PROTO_DST)) TC_INDIRECT_SCOPE int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { struct flow_head *head = rcu_dereference_bh(tp->root); struct flow_filter *f; u32 keymask; u32 classid; unsigned int n, key; int r; list_for_each_entry_rcu(f, &head->filters, list) { u32 keys[FLOW_KEY_MAX + 1]; struct flow_keys flow_keys; if (!tcf_em_tree_match(skb, &f->ematches, NULL)) continue; keymask = f->keymask; if (keymask & FLOW_KEYS_NEEDED) skb_flow_dissect_flow_keys(skb, &flow_keys, 0); for (n = 0; n < f->nkeys; n++) { key = ffs(keymask) - 1; keymask &= ~(1 << key); keys[n] = flow_key_get(skb, key, &flow_keys); } if (f->mode == FLOW_MODE_HASH) classid = jhash2(keys, f->nkeys, f->hashrnd); else { classid = keys[0]; classid = (classid & f->mask) ^ f->xor; classid = (classid >> f->rshift) + f->addend; } if (f->divisor) classid %= f->divisor; res->class = 0; res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid); r = tcf_exts_exec(skb, &f->exts, res); if (r < 0) continue; return r; } return -1; } static void flow_perturbation(struct timer_list *t) { struct flow_filter *f = from_timer(f, t, perturb_timer); get_random_bytes(&f->hashrnd, 4); if (f->perturb_period) mod_timer(&f->perturb_timer, jiffies + f->perturb_period); } static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { [TCA_FLOW_KEYS] = { .type = NLA_U32 }, [TCA_FLOW_MODE] = { .type = NLA_U32 }, [TCA_FLOW_BASECLASS] = { .type = NLA_U32 }, [TCA_FLOW_RSHIFT] = NLA_POLICY_MAX(NLA_U32, 31 /* BITS_PER_U32 - 1 */), [TCA_FLOW_ADDEND] = { .type = NLA_U32 }, [TCA_FLOW_MASK] = { .type = NLA_U32 }, [TCA_FLOW_XOR] = { .type = NLA_U32 }, [TCA_FLOW_DIVISOR] = { .type = NLA_U32 }, [TCA_FLOW_ACT] = { .type = NLA_NESTED }, [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, }; static void __flow_destroy_filter(struct flow_filter *f) { timer_shutdown_sync(&f->perturb_timer); tcf_exts_destroy(&f->exts); tcf_em_tree_destroy(&f->ematches); tcf_exts_put_net(&f->exts); kfree(f); } static void flow_destroy_filter_work(struct work_struct *work) { struct flow_filter *f = container_of(to_rcu_work(work), struct flow_filter, rwork); rtnl_lock(); __flow_destroy_filter(f); rtnl_unlock(); } static int flow_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, void **arg, u32 flags, struct netlink_ext_ack *extack) { struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *fold, *fnew; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FLOW_MAX + 1]; unsigned int nkeys = 0; unsigned int perturb_period = 0; u32 baseclass = 0; u32 keymask = 0; u32 mode; int err; if (opt == NULL) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy, NULL); if (err < 0) return err; if (tb[TCA_FLOW_BASECLASS]) { baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]); if (TC_H_MIN(baseclass) == 0) return -EINVAL; } if (tb[TCA_FLOW_KEYS]) { keymask = nla_get_u32(tb[TCA_FLOW_KEYS]); nkeys = hweight32(keymask); if (nkeys == 0) return -EINVAL; if (fls(keymask) - 1 > FLOW_KEY_MAX) return -EOPNOTSUPP; if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) && sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns) return -EOPNOTSUPP; } fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); if (!fnew) return -ENOBUFS; err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches); if (err < 0) goto err1; err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE); if (err < 0) goto err2; err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, flags, extack); if (err < 0) goto err2; fold = *arg; if (fold) { err = -EINVAL; if (fold->handle != handle && handle) goto err2; /* Copy fold into fnew */ fnew->tp = fold->tp; fnew->handle = fold->handle; fnew->nkeys = fold->nkeys; fnew->keymask = fold->keymask; fnew->mode = fold->mode; fnew->mask = fold->mask; fnew->xor = fold->xor; fnew->rshift = fold->rshift; fnew->addend = fold->addend; fnew->divisor = fold->divisor; fnew->baseclass = fold->baseclass; fnew->hashrnd = fold->hashrnd; mode = fold->mode; if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) goto err2; if (mode == FLOW_MODE_HASH) perturb_period = fold->perturb_period; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) goto err2; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } } else { err = -EINVAL; if (!handle) goto err2; if (!tb[TCA_FLOW_KEYS]) goto err2; mode = FLOW_MODE_MAP; if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) goto err2; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) goto err2; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } if (TC_H_MAJ(baseclass) == 0) { struct Qdisc *q = tcf_block_q(tp->chain->block); baseclass = TC_H_MAKE(q->handle, baseclass); } if (TC_H_MIN(baseclass) == 0) baseclass = TC_H_MAKE(baseclass, 1); fnew->handle = handle; fnew->mask = ~0U; fnew->tp = tp; get_random_bytes(&fnew->hashrnd, 4); } timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE); tcf_block_netif_keep_dst(tp->chain->block); if (tb[TCA_FLOW_KEYS]) { fnew->keymask = keymask; fnew->nkeys = nkeys; } fnew->mode = mode; if (tb[TCA_FLOW_MASK]) fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]); if (tb[TCA_FLOW_XOR]) fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]); if (tb[TCA_FLOW_RSHIFT]) fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); if (tb[TCA_FLOW_ADDEND]) fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); if (tb[TCA_FLOW_DIVISOR]) fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); if (baseclass) fnew->baseclass = baseclass; fnew->perturb_period = perturb_period; if (perturb_period) mod_timer(&fnew->perturb_timer, jiffies + perturb_period); if (!*arg) list_add_tail_rcu(&fnew->list, &head->filters); else list_replace_rcu(&fold->list, &fnew->list); *arg = fnew; if (fold) { tcf_exts_get_net(&fold->exts); tcf_queue_work(&fold->rwork, flow_destroy_filter_work); } return 0; err2: tcf_exts_destroy(&fnew->exts); tcf_em_tree_destroy(&fnew->ematches); err1: kfree(fnew); return err; } static int flow_delete(struct tcf_proto *tp, void *arg, bool *last, bool rtnl_held, struct netlink_ext_ack *extack) { struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f = arg; list_del_rcu(&f->list); tcf_exts_get_net(&f->exts); tcf_queue_work(&f->rwork, flow_destroy_filter_work); *last = list_empty(&head->filters); return 0; } static int flow_init(struct tcf_proto *tp) { struct flow_head *head; head = kzalloc(sizeof(*head), GFP_KERNEL); if (head == NULL) return -ENOBUFS; INIT_LIST_HEAD(&head->filters); rcu_assign_pointer(tp->root, head); return 0; } static void flow_destroy(struct tcf_proto *tp, bool rtnl_held, struct netlink_ext_ack *extack) { struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f, *next; list_for_each_entry_safe(f, next, &head->filters, list) { list_del_rcu(&f->list); if (tcf_exts_get_net(&f->exts)) tcf_queue_work(&f->rwork, flow_destroy_filter_work); else __flow_destroy_filter(f); } kfree_rcu(head, rcu); } static void *flow_get(struct tcf_proto *tp, u32 handle) { struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f; list_for_each_entry(f, &head->filters, list) if (f->handle == handle) return f; return NULL; } static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) { struct flow_filter *f = fh; struct nlattr *nest; if (f == NULL) return skb->len; t->tcm_handle = f->handle; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) || nla_put_u32(skb, TCA_FLOW_MODE, f->mode)) goto nla_put_failure; if (f->mask != ~0 || f->xor != 0) { if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) || nla_put_u32(skb, TCA_FLOW_XOR, f->xor)) goto nla_put_failure; } if (f->rshift && nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift)) goto nla_put_failure; if (f->addend && nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend)) goto nla_put_failure; if (f->divisor && nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor)) goto nla_put_failure; if (f->baseclass && nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass)) goto nla_put_failure; if (f->perturb_period && nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ)) goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts) < 0) goto nla_put_failure; #ifdef CONFIG_NET_EMATCH if (f->ematches.hdr.nmatches && tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0) goto nla_put_failure; #endif nla_nest_end(skb, nest); if (tcf_exts_dump_stats(skb, &f->exts) < 0) goto nla_put_failure; return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg, bool rtnl_held) { struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f; list_for_each_entry(f, &head->filters, list) { if (!tc_cls_stats_dump(tp, arg, f)) break; } } static struct tcf_proto_ops cls_flow_ops __read_mostly = { .kind = "flow", .classify = flow_classify, .init = flow_init, .destroy = flow_destroy, .change = flow_change, .delete = flow_delete, .get = flow_get, .dump = flow_dump, .walk = flow_walk, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_CLS("flow"); static int __init cls_flow_init(void) { return register_tcf_proto_ops(&cls_flow_ops); } static void __exit cls_flow_exit(void) { unregister_tcf_proto_ops(&cls_flow_ops); } module_init(cls_flow_init); module_exit(cls_flow_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("TC flow classifier"); |
8 1 2 7 6 2 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_FUTEX_H #define _ASM_X86_FUTEX_H #ifdef __KERNEL__ #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/asm.h> #include <asm/errno.h> #include <asm/processor.h> #include <asm/smap.h> #define unsafe_atomic_op1(insn, oval, uaddr, oparg, label) \ do { \ int oldval = 0, ret; \ asm volatile("1:\t" insn "\n" \ "2:\n" \ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %1) \ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ : "0" (oparg), "1" (0)); \ if (ret) \ goto label; \ *oval = oldval; \ } while(0) #define unsafe_atomic_op2(insn, oval, uaddr, oparg, label) \ do { \ int oldval = 0, ret, tem; \ asm volatile("1:\tmovl %2, %0\n" \ "2:\tmovl\t%0, %3\n" \ "\t" insn "\n" \ "3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ "\tjnz\t2b\n" \ "4:\n" \ _ASM_EXTABLE_TYPE_REG(1b, 4b, EX_TYPE_EFAULT_REG, %1) \ _ASM_EXTABLE_TYPE_REG(3b, 4b, EX_TYPE_EFAULT_REG, %1) \ : "=&a" (oldval), "=&r" (ret), \ "+m" (*uaddr), "=&r" (tem) \ : "r" (oparg), "1" (0)); \ if (ret) \ goto label; \ *oval = oldval; \ } while(0) static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) { if (can_do_masked_user_access()) uaddr = masked_user_access_begin(uaddr); else if (!user_access_begin(uaddr, sizeof(u32))) return -EFAULT; switch (op) { case FUTEX_OP_SET: unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault); break; case FUTEX_OP_ADD: unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, uaddr, oparg, Efault); break; case FUTEX_OP_OR: unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault); break; case FUTEX_OP_ANDN: unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault); break; case FUTEX_OP_XOR: unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault); break; default: user_access_end(); return -ENOSYS; } user_access_end(); return 0; Efault: user_access_end(); return -EFAULT; } static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { int ret = 0; if (can_do_masked_user_access()) uaddr = masked_user_access_begin(uaddr); else if (!user_access_begin(uaddr, sizeof(u32))) return -EFAULT; asm volatile("\n" "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" "2:\n" _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \ : "+r" (ret), "=a" (oldval), "+m" (*uaddr) : "r" (newval), "1" (oldval) : "memory" ); user_access_end(); *uval = oldval; return ret; } #endif #endif /* _ASM_X86_FUTEX_H */ |
4 4 1 4 4 4 3 4 1 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Sonix sn9c201 sn9c202 library * * Copyright (C) 2012 Jean-Francois Moine <http://moinejf.free.fr> * Copyright (C) 2008-2009 microdia project <microdia@googlegroups.com> * Copyright (C) 2009 Brian Johnson <brijohn@gmail.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/input.h> #include "gspca.h" #include "jpeg.h" #include <linux/dmi.h> MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, microdia project <microdia@googlegroups.com>"); MODULE_DESCRIPTION("GSPCA/SN9C20X USB Camera Driver"); MODULE_LICENSE("GPL"); /* * Pixel format private data */ #define SCALE_MASK 0x0f #define SCALE_160x120 0 #define SCALE_320x240 1 #define SCALE_640x480 2 #define SCALE_1280x1024 3 #define MODE_RAW 0x10 #define MODE_JPEG 0x20 #define MODE_SXGA 0x80 #define SENSOR_OV9650 0 #define SENSOR_OV9655 1 #define SENSOR_SOI968 2 #define SENSOR_OV7660 3 #define SENSOR_OV7670 4 #define SENSOR_MT9V011 5 #define SENSOR_MT9V111 6 #define SENSOR_MT9V112 7 #define SENSOR_MT9M001 8 #define SENSOR_MT9M111 9 #define SENSOR_MT9M112 10 #define SENSOR_HV7131R 11 #define SENSOR_MT9VPRB 12 /* camera flags */ #define HAS_NO_BUTTON 0x1 #define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */ #define FLIP_DETECT 0x4 #define HAS_LED_TORCH 0x8 /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; struct { /* color control cluster */ struct v4l2_ctrl *brightness; struct v4l2_ctrl *contrast; struct v4l2_ctrl *saturation; struct v4l2_ctrl *hue; }; struct { /* blue/red balance control cluster */ struct v4l2_ctrl *blue; struct v4l2_ctrl *red; }; struct { /* h/vflip control cluster */ struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; }; struct v4l2_ctrl *gamma; struct { /* autogain and exposure or gain control cluster */ struct v4l2_ctrl *autogain; struct v4l2_ctrl *exposure; struct v4l2_ctrl *gain; }; struct v4l2_ctrl *jpegqual; struct v4l2_ctrl *led_mode; struct work_struct work; u32 pktsz; /* (used by pkt_scan) */ u16 npkt; s8 nchg; u8 fmt; /* (used for JPEG QTAB update */ #define MIN_AVG_LUM 80 #define MAX_AVG_LUM 130 atomic_t avg_lum; u8 old_step; u8 older_step; u8 exposure_step; u8 i2c_addr; u8 i2c_intf; u8 sensor; u8 hstart; u8 vstart; u8 jpeg_hdr[JPEG_HDR_SZ]; u8 flags; }; static void qual_upd(struct work_struct *work); struct i2c_reg_u8 { u8 reg; u8 val; }; struct i2c_reg_u16 { u8 reg; u16 val; }; static const struct dmi_system_id flip_dmi_table[] = { { .ident = "MSI MS-1034", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "MS-1034"), DMI_MATCH(DMI_PRODUCT_VERSION, "0341") } }, { .ident = "MSI MS-1039", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"), } }, { .ident = "MSI MS-1632", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "MSI"), DMI_MATCH(DMI_BOARD_NAME, "MS-1632") } }, { .ident = "MSI MS-1633X", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "MSI"), DMI_MATCH(DMI_BOARD_NAME, "MS-1633X") } }, { .ident = "MSI MS-1635X", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "MSI"), DMI_MATCH(DMI_BOARD_NAME, "MS-1635X") } }, { .ident = "ASUSTeK W7J", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_BOARD_NAME, "W7J ") } }, {} }; static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = SCALE_160x120 | MODE_JPEG}, {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_160x120 | MODE_RAW}, {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 240 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_160x120}, {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = SCALE_320x240 | MODE_JPEG}, {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 , .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_320x240 | MODE_RAW}, {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 480 * 240 , .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_320x240}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = SCALE_640x480 | MODE_JPEG}, {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_640x480 | MODE_RAW}, {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 960 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_640x480}, }; static const struct v4l2_pix_format sxga_mode[] = { {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = SCALE_160x120 | MODE_JPEG}, {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_160x120 | MODE_RAW}, {160, 120, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 240 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_160x120}, {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = SCALE_320x240 | MODE_JPEG}, {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 , .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_320x240 | MODE_RAW}, {320, 240, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 480 * 240 , .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_320x240}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = SCALE_640x480 | MODE_JPEG}, {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_640x480 | MODE_RAW}, {640, 480, V4L2_PIX_FMT_SN9C20X_I420, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 960 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_640x480}, {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 1024, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_1280x1024 | MODE_RAW | MODE_SXGA}, }; static const struct v4l2_pix_format mono_mode[] = { {160, 120, V4L2_PIX_FMT_GREY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_160x120 | MODE_RAW}, {320, 240, V4L2_PIX_FMT_GREY, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 , .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_320x240 | MODE_RAW}, {640, 480, V4L2_PIX_FMT_GREY, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_640x480 | MODE_RAW}, {1280, 1024, V4L2_PIX_FMT_GREY, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 1024, .colorspace = V4L2_COLORSPACE_SRGB, .priv = SCALE_1280x1024 | MODE_RAW | MODE_SXGA}, }; static const s16 hsv_red_x[] = { 41, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 81, 83, 85, 87, 88, 90, 92, 93, 95, 97, 98, 100, 101, 102, 104, 105, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 123, 124, 125, 125, 126, 127, 127, 128, 128, 129, 129, 129, 130, 130, 130, 130, 131, 131, 131, 131, 131, 131, 131, 131, 130, 130, 130, 130, 129, 129, 129, 128, 128, 127, 127, 126, 125, 125, 124, 123, 122, 122, 121, 120, 119, 118, 117, 116, 115, 114, 112, 111, 110, 109, 107, 106, 105, 103, 102, 101, 99, 98, 96, 94, 93, 91, 90, 88, 86, 84, 83, 81, 79, 77, 75, 74, 72, 70, 68, 66, 64, 62, 60, 58, 56, 54, 52, 49, 47, 45, 43, 41, 39, 36, 34, 32, 30, 28, 25, 23, 21, 19, 16, 14, 12, 9, 7, 5, 3, 0, -1, -3, -6, -8, -10, -12, -15, -17, -19, -22, -24, -26, -28, -30, -33, -35, -37, -39, -41, -44, -46, -48, -50, -52, -54, -56, -58, -60, -62, -64, -66, -68, -70, -72, -74, -76, -78, -80, -81, -83, -85, -87, -88, -90, -92, -93, -95, -97, -98, -100, -101, -102, -104, -105, -107, -108, -109, -110, -112, -113, -114, -115, -116, -117, -118, -119, -120, -121, -122, -123, -123, -124, -125, -125, -126, -127, -127, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -127, -127, -126, -125, -125, -124, -123, -122, -122, -121, -120, -119, -118, -117, -116, -115, -114, -112, -111, -110, -109, -107, -106, -105, -103, -102, -101, -99, -98, -96, -94, -93, -91, -90, -88, -86, -84, -83, -81, -79, -77, -75, -74, -72, -70, -68, -66, -64, -62, -60, -58, -56, -54, -52, -49, -47, -45, -43, -41, -39, -36, -34, -32, -30, -28, -25, -23, -21, -19, -16, -14, -12, -9, -7, -5, -3, 0, 1, 3, 6, 8, 10, 12, 15, 17, 19, 22, 24, 26, 28, 30, 33, 35, 37, 39, 41 }; static const s16 hsv_red_y[] = { 82, 80, 78, 76, 74, 73, 71, 69, 67, 65, 63, 61, 58, 56, 54, 52, 50, 48, 46, 44, 41, 39, 37, 35, 32, 30, 28, 26, 23, 21, 19, 16, 14, 12, 10, 7, 5, 3, 0, -1, -3, -6, -8, -10, -13, -15, -17, -19, -22, -24, -26, -29, -31, -33, -35, -38, -40, -42, -44, -46, -48, -51, -53, -55, -57, -59, -61, -63, -65, -67, -69, -71, -73, -75, -77, -79, -81, -82, -84, -86, -88, -89, -91, -93, -94, -96, -98, -99, -101, -102, -104, -105, -106, -108, -109, -110, -112, -113, -114, -115, -116, -117, -119, -120, -120, -121, -122, -123, -124, -125, -126, -126, -127, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -127, -127, -126, -125, -125, -124, -123, -122, -121, -120, -119, -118, -117, -116, -115, -114, -113, -111, -110, -109, -107, -106, -105, -103, -102, -100, -99, -97, -96, -94, -92, -91, -89, -87, -85, -84, -82, -80, -78, -76, -74, -73, -71, -69, -67, -65, -63, -61, -58, -56, -54, -52, -50, -48, -46, -44, -41, -39, -37, -35, -32, -30, -28, -26, -23, -21, -19, -16, -14, -12, -10, -7, -5, -3, 0, 1, 3, 6, 8, 10, 13, 15, 17, 19, 22, 24, 26, 29, 31, 33, 35, 38, 40, 42, 44, 46, 48, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 82, 84, 86, 88, 89, 91, 93, 94, 96, 98, 99, 101, 102, 104, 105, 106, 108, 109, 110, 112, 113, 114, 115, 116, 117, 119, 120, 120, 121, 122, 123, 124, 125, 126, 126, 127, 128, 128, 129, 129, 130, 130, 131, 131, 131, 131, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 131, 131, 131, 130, 130, 130, 129, 129, 128, 127, 127, 126, 125, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 111, 110, 109, 107, 106, 105, 103, 102, 100, 99, 97, 96, 94, 92, 91, 89, 87, 85, 84, 82 }; static const s16 hsv_green_x[] = { -124, -124, -125, -125, -125, -125, -125, -125, -125, -126, -126, -125, -125, -125, -125, -125, -125, -124, -124, -124, -123, -123, -122, -122, -121, -121, -120, -120, -119, -118, -117, -117, -116, -115, -114, -113, -112, -111, -110, -109, -108, -107, -105, -104, -103, -102, -100, -99, -98, -96, -95, -93, -92, -91, -89, -87, -86, -84, -83, -81, -79, -77, -76, -74, -72, -70, -69, -67, -65, -63, -61, -59, -57, -55, -53, -51, -49, -47, -45, -43, -41, -39, -37, -35, -33, -30, -28, -26, -24, -22, -20, -18, -15, -13, -11, -9, -7, -4, -2, 0, 1, 3, 6, 8, 10, 12, 14, 17, 19, 21, 23, 25, 27, 29, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 71, 73, 75, 77, 78, 80, 82, 83, 85, 87, 88, 90, 91, 93, 94, 96, 97, 98, 100, 101, 102, 104, 105, 106, 107, 108, 109, 111, 112, 113, 113, 114, 115, 116, 117, 118, 118, 119, 120, 120, 121, 122, 122, 123, 123, 124, 124, 124, 125, 125, 125, 125, 125, 125, 125, 126, 126, 125, 125, 125, 125, 125, 125, 124, 124, 124, 123, 123, 122, 122, 121, 121, 120, 120, 119, 118, 117, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 105, 104, 103, 102, 100, 99, 98, 96, 95, 93, 92, 91, 89, 87, 86, 84, 83, 81, 79, 77, 76, 74, 72, 70, 69, 67, 65, 63, 61, 59, 57, 55, 53, 51, 49, 47, 45, 43, 41, 39, 37, 35, 33, 30, 28, 26, 24, 22, 20, 18, 15, 13, 11, 9, 7, 4, 2, 0, -1, -3, -6, -8, -10, -12, -14, -17, -19, -21, -23, -25, -27, -29, -32, -34, -36, -38, -40, -42, -44, -46, -48, -50, -52, -54, -56, -58, -60, -62, -64, -66, -68, -70, -71, -73, -75, -77, -78, -80, -82, -83, -85, -87, -88, -90, -91, -93, -94, -96, -97, -98, -100, -101, -102, -104, -105, -106, -107, -108, -109, -111, -112, -113, -113, -114, -115, -116, -117, -118, -118, -119, -120, -120, -121, -122, -122, -123, -123, -124, -124 }; static const s16 hsv_green_y[] = { -100, -99, -98, -97, -95, -94, -93, -91, -90, -89, -87, -86, -84, -83, -81, -80, -78, -76, -75, -73, -71, -70, -68, -66, -64, -63, -61, -59, -57, -55, -53, -51, -49, -48, -46, -44, -42, -40, -38, -36, -34, -32, -30, -27, -25, -23, -21, -19, -17, -15, -13, -11, -9, -7, -4, -2, 0, 1, 3, 5, 7, 9, 11, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 59, 61, 63, 65, 67, 68, 70, 72, 74, 75, 77, 78, 80, 82, 83, 85, 86, 88, 89, 90, 92, 93, 95, 96, 97, 98, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 112, 113, 114, 115, 115, 116, 116, 117, 117, 118, 118, 119, 119, 119, 120, 120, 120, 120, 120, 121, 121, 121, 121, 121, 121, 120, 120, 120, 120, 120, 119, 119, 119, 118, 118, 117, 117, 116, 116, 115, 114, 114, 113, 112, 111, 111, 110, 109, 108, 107, 106, 105, 104, 103, 102, 100, 99, 98, 97, 95, 94, 93, 91, 90, 89, 87, 86, 84, 83, 81, 80, 78, 76, 75, 73, 71, 70, 68, 66, 64, 63, 61, 59, 57, 55, 53, 51, 49, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 4, 2, 0, -1, -3, -5, -7, -9, -11, -14, -16, -18, -20, -22, -24, -26, -28, -30, -32, -34, -36, -38, -40, -42, -44, -46, -48, -50, -52, -54, -56, -58, -59, -61, -63, -65, -67, -68, -70, -72, -74, -75, -77, -78, -80, -82, -83, -85, -86, -88, -89, -90, -92, -93, -95, -96, -97, -98, -100, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -112, -113, -114, -115, -115, -116, -116, -117, -117, -118, -118, -119, -119, -119, -120, -120, -120, -120, -120, -121, -121, -121, -121, -121, -121, -120, -120, -120, -120, -120, -119, -119, -119, -118, -118, -117, -117, -116, -116, -115, -114, -114, -113, -112, -111, -111, -110, -109, -108, -107, -106, -105, -104, -103, -102, -100 }; static const s16 hsv_blue_x[] = { 112, 113, 114, 114, 115, 116, 117, 117, 118, 118, 119, 119, 120, 120, 120, 121, 121, 121, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 121, 121, 121, 120, 120, 120, 119, 119, 118, 118, 117, 116, 116, 115, 114, 113, 113, 112, 111, 110, 109, 108, 107, 106, 105, 104, 103, 102, 100, 99, 98, 97, 95, 94, 93, 91, 90, 88, 87, 85, 84, 82, 80, 79, 77, 76, 74, 72, 70, 69, 67, 65, 63, 61, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 19, 17, 15, 13, 11, 9, 7, 5, 2, 0, -1, -3, -5, -7, -9, -12, -14, -16, -18, -20, -22, -24, -26, -28, -31, -33, -35, -37, -39, -41, -43, -45, -47, -49, -51, -53, -54, -56, -58, -60, -62, -64, -66, -67, -69, -71, -73, -74, -76, -78, -79, -81, -83, -84, -86, -87, -89, -90, -92, -93, -94, -96, -97, -98, -99, -101, -102, -103, -104, -105, -106, -107, -108, -109, -110, -111, -112, -113, -114, -114, -115, -116, -117, -117, -118, -118, -119, -119, -120, -120, -120, -121, -121, -121, -122, -122, -122, -122, -122, -122, -122, -122, -122, -122, -122, -122, -121, -121, -121, -120, -120, -120, -119, -119, -118, -118, -117, -116, -116, -115, -114, -113, -113, -112, -111, -110, -109, -108, -107, -106, -105, -104, -103, -102, -100, -99, -98, -97, -95, -94, -93, -91, -90, -88, -87, -85, -84, -82, -80, -79, -77, -76, -74, -72, -70, -69, -67, -65, -63, -61, -60, -58, -56, -54, -52, -50, -48, -46, -44, -42, -40, -38, -36, -34, -32, -30, -28, -26, -24, -22, -19, -17, -15, -13, -11, -9, -7, -5, -2, 0, 1, 3, 5, 7, 9, 12, 14, 16, 18, 20, 22, 24, 26, 28, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 54, 56, 58, 60, 62, 64, 66, 67, 69, 71, 73, 74, 76, 78, 79, 81, 83, 84, 86, 87, 89, 90, 92, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112 }; static const s16 hsv_blue_y[] = { -11, -13, -15, -17, -19, -21, -23, -25, -27, -29, -31, -33, -35, -37, -39, -41, -43, -45, -46, -48, -50, -52, -54, -55, -57, -59, -61, -62, -64, -66, -67, -69, -71, -72, -74, -75, -77, -78, -80, -81, -83, -84, -86, -87, -88, -90, -91, -92, -93, -95, -96, -97, -98, -99, -100, -101, -102, -103, -104, -105, -106, -106, -107, -108, -109, -109, -110, -111, -111, -112, -112, -113, -113, -114, -114, -114, -115, -115, -115, -115, -116, -116, -116, -116, -116, -116, -116, -116, -116, -115, -115, -115, -115, -114, -114, -114, -113, -113, -112, -112, -111, -111, -110, -110, -109, -108, -108, -107, -106, -105, -104, -103, -102, -101, -100, -99, -98, -97, -96, -95, -94, -93, -91, -90, -89, -88, -86, -85, -84, -82, -81, -79, -78, -76, -75, -73, -71, -70, -68, -67, -65, -63, -62, -60, -58, -56, -55, -53, -51, -49, -47, -45, -44, -42, -40, -38, -36, -34, -32, -30, -28, -26, -24, -22, -20, -18, -16, -14, -12, -10, -8, -6, -4, -2, 0, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 46, 48, 50, 52, 54, 55, 57, 59, 61, 62, 64, 66, 67, 69, 71, 72, 74, 75, 77, 78, 80, 81, 83, 84, 86, 87, 88, 90, 91, 92, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 106, 107, 108, 109, 109, 110, 111, 111, 112, 112, 113, 113, 114, 114, 114, 115, 115, 115, 115, 116, 116, 116, 116, 116, 116, 116, 116, 116, 115, 115, 115, 115, 114, 114, 114, 113, 113, 112, 112, 111, 111, 110, 110, 109, 108, 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 91, 90, 89, 88, 86, 85, 84, 82, 81, 79, 78, 76, 75, 73, 71, 70, 68, 67, 65, 63, 62, 60, 58, 56, 55, 53, 51, 49, 47, 45, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0, -1, -3, -5, -7, -9, -11 }; static const u16 bridge_init[][2] = { {0x1000, 0x78}, {0x1001, 0x40}, {0x1002, 0x1c}, {0x1020, 0x80}, {0x1061, 0x01}, {0x1067, 0x40}, {0x1068, 0x30}, {0x1069, 0x20}, {0x106a, 0x10}, {0x106b, 0x08}, {0x1188, 0x87}, {0x11a1, 0x00}, {0x11a2, 0x00}, {0x11a3, 0x6a}, {0x11a4, 0x50}, {0x11ab, 0x00}, {0x11ac, 0x00}, {0x11ad, 0x50}, {0x11ae, 0x3c}, {0x118a, 0x04}, {0x0395, 0x04}, {0x11b8, 0x3a}, {0x118b, 0x0e}, {0x10f7, 0x05}, {0x10f8, 0x14}, {0x10fa, 0xff}, {0x10f9, 0x00}, {0x11ba, 0x0a}, {0x11a5, 0x2d}, {0x11a6, 0x2d}, {0x11a7, 0x3a}, {0x11a8, 0x05}, {0x11a9, 0x04}, {0x11aa, 0x3f}, {0x11af, 0x28}, {0x11b0, 0xd8}, {0x11b1, 0x14}, {0x11b2, 0xec}, {0x11b3, 0x32}, {0x11b4, 0xdd}, {0x11b5, 0x32}, {0x11b6, 0xdd}, {0x10e0, 0x2c}, {0x11bc, 0x40}, {0x11bd, 0x01}, {0x11be, 0xf0}, {0x11bf, 0x00}, {0x118c, 0x1f}, {0x118d, 0x1f}, {0x118e, 0x1f}, {0x118f, 0x1f}, {0x1180, 0x01}, {0x1181, 0x00}, {0x1182, 0x01}, {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80}, {0x1007, 0x00} }; /* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */ static const u8 ov_gain[] = { 0x00 /* 1x */, 0x04 /* 1.25x */, 0x08 /* 1.5x */, 0x0c /* 1.75x */, 0x10 /* 2x */, 0x12 /* 2.25x */, 0x14 /* 2.5x */, 0x16 /* 2.75x */, 0x18 /* 3x */, 0x1a /* 3.25x */, 0x1c /* 3.5x */, 0x1e /* 3.75x */, 0x30 /* 4x */, 0x31 /* 4.25x */, 0x32 /* 4.5x */, 0x33 /* 4.75x */, 0x34 /* 5x */, 0x35 /* 5.25x */, 0x36 /* 5.5x */, 0x37 /* 5.75x */, 0x38 /* 6x */, 0x39 /* 6.25x */, 0x3a /* 6.5x */, 0x3b /* 6.75x */, 0x3c /* 7x */, 0x3d /* 7.25x */, 0x3e /* 7.5x */, 0x3f /* 7.75x */, 0x70 /* 8x */ }; /* Gain = (bit[8] + 1) * (bit[7] + 1) * (bit[6:0] * 0.03125) */ static const u16 micron1_gain[] = { /* 1x 1.25x 1.5x 1.75x */ 0x0020, 0x0028, 0x0030, 0x0038, /* 2x 2.25x 2.5x 2.75x */ 0x00a0, 0x00a4, 0x00a8, 0x00ac, /* 3x 3.25x 3.5x 3.75x */ 0x00b0, 0x00b4, 0x00b8, 0x00bc, /* 4x 4.25x 4.5x 4.75x */ 0x00c0, 0x00c4, 0x00c8, 0x00cc, /* 5x 5.25x 5.5x 5.75x */ 0x00d0, 0x00d4, 0x00d8, 0x00dc, /* 6x 6.25x 6.5x 6.75x */ 0x00e0, 0x00e4, 0x00e8, 0x00ec, /* 7x 7.25x 7.5x 7.75x */ 0x00f0, 0x00f4, 0x00f8, 0x00fc, /* 8x */ 0x01c0 }; /* mt9m001 sensor uses a different gain formula then other micron sensors */ /* Gain = (bit[6] + 1) * (bit[5-0] * 0.125) */ static const u16 micron2_gain[] = { /* 1x 1.25x 1.5x 1.75x */ 0x0008, 0x000a, 0x000c, 0x000e, /* 2x 2.25x 2.5x 2.75x */ 0x0010, 0x0012, 0x0014, 0x0016, /* 3x 3.25x 3.5x 3.75x */ 0x0018, 0x001a, 0x001c, 0x001e, /* 4x 4.25x 4.5x 4.75x */ 0x0020, 0x0051, 0x0052, 0x0053, /* 5x 5.25x 5.5x 5.75x */ 0x0054, 0x0055, 0x0056, 0x0057, /* 6x 6.25x 6.5x 6.75x */ 0x0058, 0x0059, 0x005a, 0x005b, /* 7x 7.25x 7.5x 7.75x */ 0x005c, 0x005d, 0x005e, 0x005f, /* 8x */ 0x0060 }; /* Gain = .5 + bit[7:0] / 16 */ static const u8 hv7131r_gain[] = { 0x08 /* 1x */, 0x0c /* 1.25x */, 0x10 /* 1.5x */, 0x14 /* 1.75x */, 0x18 /* 2x */, 0x1c /* 2.25x */, 0x20 /* 2.5x */, 0x24 /* 2.75x */, 0x28 /* 3x */, 0x2c /* 3.25x */, 0x30 /* 3.5x */, 0x34 /* 3.75x */, 0x38 /* 4x */, 0x3c /* 4.25x */, 0x40 /* 4.5x */, 0x44 /* 4.75x */, 0x48 /* 5x */, 0x4c /* 5.25x */, 0x50 /* 5.5x */, 0x54 /* 5.75x */, 0x58 /* 6x */, 0x5c /* 6.25x */, 0x60 /* 6.5x */, 0x64 /* 6.75x */, 0x68 /* 7x */, 0x6c /* 7.25x */, 0x70 /* 7.5x */, 0x74 /* 7.75x */, 0x78 /* 8x */ }; static const struct i2c_reg_u8 soi968_init[] = { {0x0c, 0x00}, {0x0f, 0x1f}, {0x11, 0x80}, {0x38, 0x52}, {0x1e, 0x00}, {0x33, 0x08}, {0x35, 0x8c}, {0x36, 0x0c}, {0x37, 0x04}, {0x45, 0x04}, {0x47, 0xff}, {0x3e, 0x00}, {0x3f, 0x00}, {0x3b, 0x20}, {0x3a, 0x96}, {0x3d, 0x0a}, {0x14, 0x8e}, {0x13, 0x8b}, {0x12, 0x40}, {0x17, 0x13}, {0x18, 0x63}, {0x19, 0x01}, {0x1a, 0x79}, {0x32, 0x24}, {0x03, 0x00}, {0x11, 0x40}, {0x2a, 0x10}, {0x2b, 0xe0}, {0x10, 0x32}, {0x00, 0x00}, {0x01, 0x80}, {0x02, 0x80}, }; static const struct i2c_reg_u8 ov7660_init[] = { {0x0e, 0x80}, {0x0d, 0x08}, {0x0f, 0xc3}, {0x04, 0xc3}, {0x10, 0x40}, {0x11, 0x40}, {0x12, 0x05}, {0x13, 0xba}, {0x14, 0x2a}, /* HDG Set hstart and hstop, datasheet default 0x11, 0x61, using 0x10, 0x61 and sd->hstart, vstart = 3, fixes ugly colored borders */ {0x17, 0x10}, {0x18, 0x61}, {0x37, 0x0f}, {0x38, 0x02}, {0x39, 0x43}, {0x3a, 0x00}, {0x69, 0x90}, {0x2d, 0x00}, {0x2e, 0x00}, {0x01, 0x78}, {0x02, 0x50}, }; static const struct i2c_reg_u8 ov7670_init[] = { {0x11, 0x80}, {0x3a, 0x04}, {0x12, 0x01}, {0x32, 0xb6}, {0x03, 0x0a}, {0x0c, 0x00}, {0x3e, 0x00}, {0x70, 0x3a}, {0x71, 0x35}, {0x72, 0x11}, {0x73, 0xf0}, {0xa2, 0x02}, {0x13, 0xe0}, {0x00, 0x00}, {0x10, 0x00}, {0x0d, 0x40}, {0x14, 0x28}, {0xa5, 0x05}, {0xab, 0x07}, {0x24, 0x95}, {0x25, 0x33}, {0x26, 0xe3}, {0x9f, 0x75}, {0xa0, 0x65}, {0xa1, 0x0b}, {0xa6, 0xd8}, {0xa7, 0xd8}, {0xa8, 0xf0}, {0xa9, 0x90}, {0xaa, 0x94}, {0x13, 0xe5}, {0x0e, 0x61}, {0x0f, 0x4b}, {0x16, 0x02}, {0x1e, 0x27}, {0x21, 0x02}, {0x22, 0x91}, {0x29, 0x07}, {0x33, 0x0b}, {0x35, 0x0b}, {0x37, 0x1d}, {0x38, 0x71}, {0x39, 0x2a}, {0x3c, 0x78}, {0x4d, 0x40}, {0x4e, 0x20}, {0x69, 0x00}, {0x74, 0x19}, {0x8d, 0x4f}, {0x8e, 0x00}, {0x8f, 0x00}, {0x90, 0x00}, {0x91, 0x00}, {0x96, 0x00}, {0x9a, 0x80}, {0xb0, 0x84}, {0xb1, 0x0c}, {0xb2, 0x0e}, {0xb3, 0x82}, {0xb8, 0x0a}, {0x43, 0x0a}, {0x44, 0xf0}, {0x45, 0x20}, {0x46, 0x7d}, {0x47, 0x29}, {0x48, 0x4a}, {0x59, 0x8c}, {0x5a, 0xa5}, {0x5b, 0xde}, {0x5c, 0x96}, {0x5d, 0x66}, {0x5e, 0x10}, {0x6c, 0x0a}, {0x6d, 0x55}, {0x6e, 0x11}, {0x6f, 0x9e}, {0x6a, 0x40}, {0x01, 0x40}, {0x02, 0x40}, {0x13, 0xe7}, {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x02}, {0x52, 0x1d}, {0x53, 0x56}, {0x54, 0x73}, {0x55, 0x0a}, {0x56, 0x55}, {0x57, 0x80}, {0x58, 0x9e}, {0x41, 0x08}, {0x3f, 0x02}, {0x75, 0x03}, {0x76, 0x63}, {0x4c, 0x04}, {0x77, 0x06}, {0x3d, 0x02}, {0x4b, 0x09}, {0xc9, 0x30}, {0x41, 0x08}, {0x56, 0x48}, {0x34, 0x11}, {0xa4, 0x88}, {0x96, 0x00}, {0x97, 0x30}, {0x98, 0x20}, {0x99, 0x30}, {0x9a, 0x84}, {0x9b, 0x29}, {0x9c, 0x03}, {0x9d, 0x99}, {0x9e, 0x7f}, {0x78, 0x04}, {0x79, 0x01}, {0xc8, 0xf0}, {0x79, 0x0f}, {0xc8, 0x00}, {0x79, 0x10}, {0xc8, 0x7e}, {0x79, 0x0a}, {0xc8, 0x80}, {0x79, 0x0b}, {0xc8, 0x01}, {0x79, 0x0c}, {0xc8, 0x0f}, {0x79, 0x0d}, {0xc8, 0x20}, {0x79, 0x09}, {0xc8, 0x80}, {0x79, 0x02}, {0xc8, 0xc0}, {0x79, 0x03}, {0xc8, 0x40}, {0x79, 0x05}, {0xc8, 0x30}, {0x79, 0x26}, {0x62, 0x20}, {0x63, 0x00}, {0x64, 0x06}, {0x65, 0x00}, {0x66, 0x05}, {0x94, 0x05}, {0x95, 0x0a}, {0x17, 0x13}, {0x18, 0x01}, {0x19, 0x02}, {0x1a, 0x7a}, {0x46, 0x59}, {0x47, 0x30}, {0x58, 0x9a}, {0x59, 0x84}, {0x5a, 0x91}, {0x5b, 0x57}, {0x5c, 0x75}, {0x5d, 0x6d}, {0x5e, 0x13}, {0x64, 0x07}, {0x94, 0x07}, {0x95, 0x0d}, {0xa6, 0xdf}, {0xa7, 0xdf}, {0x48, 0x4d}, {0x51, 0x00}, {0x6b, 0x0a}, {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00}, {0x92, 0x00}, {0x93, 0x00}, {0x55, 0x0a}, {0x56, 0x60}, {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d}, {0x53, 0x56}, {0x54, 0x73}, {0x58, 0x9a}, {0x4f, 0x6e}, {0x50, 0x70}, {0x51, 0x00}, {0x52, 0x1d}, {0x53, 0x56}, {0x54, 0x73}, {0x58, 0x9a}, {0x3f, 0x01}, {0x7b, 0x03}, {0x7c, 0x09}, {0x7d, 0x16}, {0x7e, 0x38}, {0x7f, 0x47}, {0x80, 0x53}, {0x81, 0x5e}, {0x82, 0x6a}, {0x83, 0x74}, {0x84, 0x80}, {0x85, 0x8c}, {0x86, 0x9b}, {0x87, 0xb2}, {0x88, 0xcc}, {0x89, 0xe5}, {0x7a, 0x24}, {0x3b, 0x00}, {0x9f, 0x76}, {0xa0, 0x65}, {0x13, 0xe2}, {0x6b, 0x0a}, {0x11, 0x80}, {0x2a, 0x00}, {0x2b, 0x00}, {0x92, 0x00}, {0x93, 0x00}, }; static const struct i2c_reg_u8 ov9650_init[] = { {0x00, 0x00}, {0x01, 0x78}, {0x02, 0x78}, {0x03, 0x36}, {0x04, 0x03}, {0x05, 0x00}, {0x06, 0x00}, {0x08, 0x00}, {0x09, 0x01}, {0x0c, 0x00}, {0x0d, 0x00}, {0x0e, 0xa0}, {0x0f, 0x52}, {0x10, 0x7c}, {0x11, 0x80}, {0x12, 0x45}, {0x13, 0xc2}, {0x14, 0x2e}, {0x15, 0x00}, {0x16, 0x07}, {0x17, 0x24}, {0x18, 0xc5}, {0x19, 0x00}, {0x1a, 0x3c}, {0x1b, 0x00}, {0x1e, 0x04}, {0x1f, 0x00}, {0x24, 0x78}, {0x25, 0x68}, {0x26, 0xd4}, {0x27, 0x80}, {0x28, 0x80}, {0x29, 0x30}, {0x2a, 0x00}, {0x2b, 0x00}, {0x2c, 0x80}, {0x2d, 0x00}, {0x2e, 0x00}, {0x2f, 0x00}, {0x30, 0x08}, {0x31, 0x30}, {0x32, 0x84}, {0x33, 0xe2}, {0x34, 0xbf}, {0x35, 0x81}, {0x36, 0xf9}, {0x37, 0x00}, {0x38, 0x93}, {0x39, 0x50}, {0x3a, 0x01}, {0x3b, 0x01}, {0x3c, 0x73}, {0x3d, 0x19}, {0x3e, 0x0b}, {0x3f, 0x80}, {0x40, 0xc1}, {0x41, 0x00}, {0x42, 0x08}, {0x67, 0x80}, {0x68, 0x80}, {0x69, 0x40}, {0x6a, 0x00}, {0x6b, 0x0a}, {0x8b, 0x06}, {0x8c, 0x20}, {0x8d, 0x00}, {0x8e, 0x00}, {0x8f, 0xdf}, {0x92, 0x00}, {0x93, 0x00}, {0x94, 0x88}, {0x95, 0x88}, {0x96, 0x04}, {0xa1, 0x00}, {0xa5, 0x80}, {0xa8, 0x80}, {0xa9, 0xb8}, {0xaa, 0x92}, {0xab, 0x0a}, }; static const struct i2c_reg_u8 ov9655_init[] = { {0x0e, 0x61}, {0x11, 0x80}, {0x13, 0xba}, {0x14, 0x2e}, {0x16, 0x24}, {0x1e, 0x04}, {0x27, 0x08}, {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x34, 0x3d}, {0x35, 0x00}, {0x38, 0x12}, {0x0f, 0x42}, {0x39, 0x57}, {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c}, {0x3d, 0x19}, {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40}, {0x42, 0x80}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, {0x48, 0x3c}, {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc}, {0x4d, 0xdc}, {0x4e, 0xdc}, {0x6c, 0x04}, {0x6f, 0x9e}, {0x70, 0x05}, {0x71, 0x78}, {0x77, 0x02}, {0x8a, 0x23}, {0x90, 0x7e}, {0x91, 0x7c}, {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, {0xa6, 0x60}, {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, {0xab, 0x04}, {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80}, {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, {0xb6, 0xaf}, {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, {0xbe, 0x3b}, {0xbf, 0x3a}, {0xc1, 0xc8}, {0xc2, 0x01}, {0xc4, 0x00}, {0xc6, 0x85}, {0xc7, 0x81}, {0xc9, 0xe0}, {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x2d, 0x00}, {0x2e, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x12, 0x61}, {0x36, 0xfa}, {0x8c, 0x8d}, {0xc0, 0xaa}, {0x69, 0x0a}, {0x03, 0x09}, {0x17, 0x16}, {0x18, 0x6e}, {0x19, 0x01}, {0x1a, 0x3e}, {0x32, 0x09}, {0x2a, 0x10}, {0x2b, 0x0a}, {0x92, 0x00}, {0x93, 0x00}, {0xa1, 0x00}, {0x10, 0x7c}, {0x04, 0x03}, {0x00, 0x13}, }; static const struct i2c_reg_u16 mt9v112_init[] = { {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0020}, {0x34, 0xc019}, {0x0a, 0x0011}, {0x0b, 0x000b}, {0x20, 0x0703}, {0x35, 0x2022}, {0xf0, 0x0001}, {0x05, 0x0000}, {0x06, 0x340c}, {0x3b, 0x042a}, {0x3c, 0x0400}, {0xf0, 0x0002}, {0x2e, 0x0c58}, {0x5b, 0x0001}, {0xc8, 0x9f0b}, {0xf0, 0x0001}, {0x9b, 0x5300}, {0xf0, 0x0000}, {0x2b, 0x0020}, {0x2c, 0x002a}, {0x2d, 0x0032}, {0x2e, 0x0020}, {0x09, 0x01dc}, {0x01, 0x000c}, {0x02, 0x0020}, {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c}, {0x05, 0x0098}, {0x20, 0x0703}, {0x09, 0x01f2}, {0x2b, 0x00a0}, {0x2c, 0x00a0}, {0x2d, 0x00a0}, {0x2e, 0x00a0}, {0x01, 0x000c}, {0x02, 0x0020}, {0x03, 0x01e0}, {0x04, 0x0280}, {0x06, 0x000c}, {0x05, 0x0098}, {0x09, 0x01c1}, {0x2b, 0x00ae}, {0x2c, 0x00ae}, {0x2d, 0x00ae}, {0x2e, 0x00ae}, }; static const struct i2c_reg_u16 mt9v111_init[] = { {0x01, 0x0004}, {0x0d, 0x0001}, {0x0d, 0x0000}, {0x01, 0x0001}, {0x05, 0x0004}, {0x2d, 0xe0a0}, {0x2e, 0x0c64}, {0x2f, 0x0064}, {0x06, 0x600e}, {0x08, 0x0480}, {0x01, 0x0004}, {0x02, 0x0016}, {0x03, 0x01e7}, {0x04, 0x0287}, {0x05, 0x0004}, {0x06, 0x002d}, {0x07, 0x3002}, {0x08, 0x0008}, {0x0e, 0x0008}, {0x20, 0x0000} }; static const struct i2c_reg_u16 mt9v011_init[] = { {0x07, 0x0002}, {0x0d, 0x0001}, {0x0d, 0x0000}, {0x01, 0x0008}, {0x02, 0x0016}, {0x03, 0x01e1}, {0x04, 0x0281}, {0x05, 0x0083}, {0x06, 0x0006}, {0x0d, 0x0002}, {0x0a, 0x0000}, {0x0b, 0x0000}, {0x0c, 0x0000}, {0x0d, 0x0000}, {0x0e, 0x0000}, {0x0f, 0x0000}, {0x10, 0x0000}, {0x11, 0x0000}, {0x12, 0x0000}, {0x13, 0x0000}, {0x14, 0x0000}, {0x15, 0x0000}, {0x16, 0x0000}, {0x17, 0x0000}, {0x18, 0x0000}, {0x19, 0x0000}, {0x1a, 0x0000}, {0x1b, 0x0000}, {0x1c, 0x0000}, {0x1d, 0x0000}, {0x32, 0x0000}, {0x20, 0x1101}, {0x21, 0x0000}, {0x22, 0x0000}, {0x23, 0x0000}, {0x24, 0x0000}, {0x25, 0x0000}, {0x26, 0x0000}, {0x27, 0x0024}, {0x2f, 0xf7b0}, {0x30, 0x0005}, {0x31, 0x0000}, {0x32, 0x0000}, {0x33, 0x0000}, {0x34, 0x0100}, {0x3d, 0x068f}, {0x40, 0x01e0}, {0x41, 0x00d1}, {0x44, 0x0082}, {0x5a, 0x0000}, {0x5b, 0x0000}, {0x5c, 0x0000}, {0x5d, 0x0000}, {0x5e, 0x0000}, {0x5f, 0xa31d}, {0x62, 0x0611}, {0x0a, 0x0000}, {0x06, 0x0029}, {0x05, 0x0009}, {0x20, 0x1101}, {0x20, 0x1101}, {0x09, 0x0064}, {0x07, 0x0003}, {0x2b, 0x0033}, {0x2c, 0x00a0}, {0x2d, 0x00a0}, {0x2e, 0x0033}, {0x07, 0x0002}, {0x06, 0x0000}, {0x06, 0x0029}, {0x05, 0x0009}, }; static const struct i2c_reg_u16 mt9m001_init[] = { {0x0d, 0x0001}, {0x0d, 0x0000}, {0x04, 0x0500}, /* hres = 1280 */ {0x03, 0x0400}, /* vres = 1024 */ {0x20, 0x1100}, {0x06, 0x0010}, {0x2b, 0x0024}, {0x2e, 0x0024}, {0x35, 0x0024}, {0x2d, 0x0020}, {0x2c, 0x0020}, {0x09, 0x0ad4}, {0x35, 0x0057}, }; static const struct i2c_reg_u16 mt9m111_init[] = { {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008}, {0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300}, {0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e}, {0xf0, 0x0000}, }; static const struct i2c_reg_u16 mt9m112_init[] = { {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008}, {0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300}, {0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e}, {0xf0, 0x0000}, }; static const struct i2c_reg_u8 hv7131r_init[] = { {0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08}, {0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0}, {0x22, 0x00}, {0x23, 0x09}, {0x01, 0x08}, {0x01, 0x08}, {0x01, 0x08}, {0x25, 0x07}, {0x26, 0xc3}, {0x27, 0x50}, {0x30, 0x62}, {0x31, 0x10}, {0x32, 0x06}, {0x33, 0x10}, {0x20, 0x00}, {0x21, 0xd0}, {0x22, 0x00}, {0x23, 0x09}, {0x01, 0x08}, }; static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length) { struct usb_device *dev = gspca_dev->dev; int result; if (gspca_dev->usb_err < 0) return; result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, reg, 0x00, gspca_dev->usb_buf, length, 500); if (unlikely(result < 0 || result != length)) { pr_err("Read register %02x failed %d\n", reg, result); gspca_dev->usb_err = result; /* * Make sure the buffer is zeroed to avoid uninitialized * values. */ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } static void reg_w(struct gspca_dev *gspca_dev, u16 reg, const u8 *buffer, int length) { struct usb_device *dev = gspca_dev->dev; int result; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, buffer, length); result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, reg, 0x00, gspca_dev->usb_buf, length, 500); if (unlikely(result < 0 || result != length)) { pr_err("Write register %02x failed %d\n", reg, result); gspca_dev->usb_err = result; } } static void reg_w1(struct gspca_dev *gspca_dev, u16 reg, const u8 value) { reg_w(gspca_dev, reg, &value, 1); } static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buffer) { int i; reg_w(gspca_dev, 0x10c0, buffer, 8); for (i = 0; i < 5; i++) { reg_r(gspca_dev, 0x10c0, 1); if (gspca_dev->usb_err < 0) return; if (gspca_dev->usb_buf[0] & 0x04) { if (gspca_dev->usb_buf[0] & 0x08) { pr_err("i2c_w error\n"); gspca_dev->usb_err = -EIO; } return; } msleep(10); } pr_err("i2c_w reg %02x no response\n", buffer[2]); /* gspca_dev->usb_err = -EIO; fixme: may occur */ } static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val) { struct sd *sd = (struct sd *) gspca_dev; u8 row[8]; /* * from the point of view of the bridge, the length * includes the address */ row[0] = sd->i2c_intf | (2 << 4); row[1] = sd->i2c_addr; row[2] = reg; row[3] = val; row[4] = 0x00; row[5] = 0x00; row[6] = 0x00; row[7] = 0x10; i2c_w(gspca_dev, row); } static void i2c_w1_buf(struct gspca_dev *gspca_dev, const struct i2c_reg_u8 *buf, int sz) { while (--sz >= 0) { i2c_w1(gspca_dev, buf->reg, buf->val); buf++; } } static void i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val) { struct sd *sd = (struct sd *) gspca_dev; u8 row[8]; /* * from the point of view of the bridge, the length * includes the address */ row[0] = sd->i2c_intf | (3 << 4); row[1] = sd->i2c_addr; row[2] = reg; row[3] = val >> 8; row[4] = val; row[5] = 0x00; row[6] = 0x00; row[7] = 0x10; i2c_w(gspca_dev, row); } static void i2c_w2_buf(struct gspca_dev *gspca_dev, const struct i2c_reg_u16 *buf, int sz) { while (--sz >= 0) { i2c_w2(gspca_dev, buf->reg, buf->val); buf++; } } static void i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val) { struct sd *sd = (struct sd *) gspca_dev; u8 row[8]; row[0] = sd->i2c_intf | (1 << 4); row[1] = sd->i2c_addr; row[2] = reg; row[3] = 0; row[4] = 0; row[5] = 0; row[6] = 0; row[7] = 0x10; i2c_w(gspca_dev, row); row[0] = sd->i2c_intf | (1 << 4) | 0x02; row[2] = 0; i2c_w(gspca_dev, row); reg_r(gspca_dev, 0x10c2, 5); *val = gspca_dev->usb_buf[4]; } static void i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val) { struct sd *sd = (struct sd *) gspca_dev; u8 row[8]; row[0] = sd->i2c_intf | (1 << 4); row[1] = sd->i2c_addr; row[2] = reg; row[3] = 0; row[4] = 0; row[5] = 0; row[6] = 0; row[7] = 0x10; i2c_w(gspca_dev, row); row[0] = sd->i2c_intf | (2 << 4) | 0x02; row[2] = 0; i2c_w(gspca_dev, row); reg_r(gspca_dev, 0x10c2, 5); *val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; } static void ov9650_init_sensor(struct gspca_dev *gspca_dev) { u16 id; struct sd *sd = (struct sd *) gspca_dev; i2c_r2(gspca_dev, 0x1c, &id); if (gspca_dev->usb_err < 0) return; if (id != 0x7fa2) { pr_err("sensor id for ov9650 doesn't match (0x%04x)\n", id); gspca_dev->usb_err = -ENODEV; return; } i2c_w1(gspca_dev, 0x12, 0x80); /* sensor reset */ msleep(200); i2c_w1_buf(gspca_dev, ov9650_init, ARRAY_SIZE(ov9650_init)); if (gspca_dev->usb_err < 0) pr_err("OV9650 sensor initialization failed\n"); sd->hstart = 1; sd->vstart = 7; } static void ov9655_init_sensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; i2c_w1(gspca_dev, 0x12, 0x80); /* sensor reset */ msleep(200); i2c_w1_buf(gspca_dev, ov9655_init, ARRAY_SIZE(ov9655_init)); if (gspca_dev->usb_err < 0) pr_err("OV9655 sensor initialization failed\n"); sd->hstart = 1; sd->vstart = 2; } static void soi968_init_sensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; i2c_w1(gspca_dev, 0x12, 0x80); /* sensor reset */ msleep(200); i2c_w1_buf(gspca_dev, soi968_init, ARRAY_SIZE(soi968_init)); if (gspca_dev->usb_err < 0) pr_err("SOI968 sensor initialization failed\n"); sd->hstart = 60; sd->vstart = 11; } static void ov7660_init_sensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; i2c_w1(gspca_dev, 0x12, 0x80); /* sensor reset */ msleep(200); i2c_w1_buf(gspca_dev, ov7660_init, ARRAY_SIZE(ov7660_init)); if (gspca_dev->usb_err < 0) pr_err("OV7660 sensor initialization failed\n"); sd->hstart = 3; sd->vstart = 3; } static void ov7670_init_sensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; i2c_w1(gspca_dev, 0x12, 0x80); /* sensor reset */ msleep(200); i2c_w1_buf(gspca_dev, ov7670_init, ARRAY_SIZE(ov7670_init)); if (gspca_dev->usb_err < 0) pr_err("OV7670 sensor initialization failed\n"); sd->hstart = 0; sd->vstart = 1; } static void mt9v_init_sensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 value; sd->i2c_addr = 0x5d; i2c_r2(gspca_dev, 0xff, &value); if (gspca_dev->usb_err >= 0 && value == 0x8243) { i2c_w2_buf(gspca_dev, mt9v011_init, ARRAY_SIZE(mt9v011_init)); if (gspca_dev->usb_err < 0) { pr_err("MT9V011 sensor initialization failed\n"); return; } sd->hstart = 2; sd->vstart = 2; sd->sensor = SENSOR_MT9V011; pr_info("MT9V011 sensor detected\n"); return; } gspca_dev->usb_err = 0; sd->i2c_addr = 0x5c; i2c_w2(gspca_dev, 0x01, 0x0004); i2c_r2(gspca_dev, 0xff, &value); if (gspca_dev->usb_err >= 0 && value == 0x823a) { i2c_w2_buf(gspca_dev, mt9v111_init, ARRAY_SIZE(mt9v111_init)); if (gspca_dev->usb_err < 0) { pr_err("MT9V111 sensor initialization failed\n"); return; } sd->hstart = 2; sd->vstart = 2; sd->sensor = SENSOR_MT9V111; pr_info("MT9V111 sensor detected\n"); return; } gspca_dev->usb_err = 0; sd->i2c_addr = 0x5d; i2c_w2(gspca_dev, 0xf0, 0x0000); if (gspca_dev->usb_err < 0) { gspca_dev->usb_err = 0; sd->i2c_addr = 0x48; i2c_w2(gspca_dev, 0xf0, 0x0000); } i2c_r2(gspca_dev, 0x00, &value); if (gspca_dev->usb_err >= 0 && value == 0x1229) { i2c_w2_buf(gspca_dev, mt9v112_init, ARRAY_SIZE(mt9v112_init)); if (gspca_dev->usb_err < 0) { pr_err("MT9V112 sensor initialization failed\n"); return; } sd->hstart = 6; sd->vstart = 2; sd->sensor = SENSOR_MT9V112; pr_info("MT9V112 sensor detected\n"); return; } gspca_dev->usb_err = -ENODEV; } static void mt9m112_init_sensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; i2c_w2_buf(gspca_dev, mt9m112_init, ARRAY_SIZE(mt9m112_init)); if (gspca_dev->usb_err < 0) pr_err("MT9M112 sensor initialization failed\n"); sd->hstart = 0; sd->vstart = 2; } static void mt9m111_init_sensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; i2c_w2_buf(gspca_dev, mt9m111_init, ARRAY_SIZE(mt9m111_init)); if (gspca_dev->usb_err < 0) pr_err("MT9M111 sensor initialization failed\n"); sd->hstart = 0; sd->vstart = 2; } static void mt9m001_init_sensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 id; i2c_r2(gspca_dev, 0x00, &id); if (gspca_dev->usb_err < 0) return; /* must be 0x8411 or 0x8421 for colour sensor and 8431 for bw */ switch (id) { case 0x8411: case 0x8421: pr_info("MT9M001 color sensor detected\n"); break; case 0x8431: pr_info("MT9M001 mono sensor detected\n"); break; default: pr_err("No MT9M001 chip detected, ID = %x\n\n", id); gspca_dev->usb_err = -ENODEV; return; } i2c_w2_buf(gspca_dev, mt9m001_init, ARRAY_SIZE(mt9m001_init)); if (gspca_dev->usb_err < 0) pr_err("MT9M001 sensor initialization failed\n"); sd->hstart = 1; sd->vstart = 1; } static void hv7131r_init_sensor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; i2c_w1_buf(gspca_dev, hv7131r_init, ARRAY_SIZE(hv7131r_init)); if (gspca_dev->usb_err < 0) pr_err("HV7131R Sensor initialization failed\n"); sd->hstart = 0; sd->vstart = 1; } static void set_cmatrix(struct gspca_dev *gspca_dev, s32 brightness, s32 contrast, s32 satur, s32 hue) { s32 hue_coord, hue_index = 180 + hue; u8 cmatrix[21]; memset(cmatrix, 0, sizeof(cmatrix)); cmatrix[2] = (contrast * 0x25 / 0x100) + 0x26; cmatrix[0] = 0x13 + (cmatrix[2] - 0x26) * 0x13 / 0x25; cmatrix[4] = 0x07 + (cmatrix[2] - 0x26) * 0x07 / 0x25; cmatrix[18] = brightness - 0x80; hue_coord = (hsv_red_x[hue_index] * satur) >> 8; cmatrix[6] = hue_coord; cmatrix[7] = (hue_coord >> 8) & 0x0f; hue_coord = (hsv_red_y[hue_index] * satur) >> 8; cmatrix[8] = hue_coord; cmatrix[9] = (hue_coord >> 8) & 0x0f; hue_coord = (hsv_green_x[hue_index] * satur) >> 8; cmatrix[10] = hue_coord; cmatrix[11] = (hue_coord >> 8) & 0x0f; hue_coord = (hsv_green_y[hue_index] * satur) >> 8; cmatrix[12] = hue_coord; cmatrix[13] = (hue_coord >> 8) & 0x0f; hue_coord = (hsv_blue_x[hue_index] * satur) >> 8; cmatrix[14] = hue_coord; cmatrix[15] = (hue_coord >> 8) & 0x0f; hue_coord = (hsv_blue_y[hue_index] * satur) >> 8; cmatrix[16] = hue_coord; cmatrix[17] = (hue_coord >> 8) & 0x0f; reg_w(gspca_dev, 0x10e1, cmatrix, 21); } static void set_gamma(struct gspca_dev *gspca_dev, s32 val) { u8 gamma[17]; u8 gval = val * 0xb8 / 0x100; gamma[0] = 0x0a; gamma[1] = 0x13 + (gval * (0xcb - 0x13) / 0xb8); gamma[2] = 0x25 + (gval * (0xee - 0x25) / 0xb8); gamma[3] = 0x37 + (gval * (0xfa - 0x37) / 0xb8); gamma[4] = 0x45 + (gval * (0xfc - 0x45) / 0xb8); gamma[5] = 0x55 + (gval * (0xfb - 0x55) / 0xb8); gamma[6] = 0x65 + (gval * (0xfc - 0x65) / 0xb8); gamma[7] = 0x74 + (gval * (0xfd - 0x74) / 0xb8); gamma[8] = 0x83 + (gval * (0xfe - 0x83) / 0xb8); gamma[9] = 0x92 + (gval * (0xfc - 0x92) / 0xb8); gamma[10] = 0xa1 + (gval * (0xfc - 0xa1) / 0xb8); gamma[11] = 0xb0 + (gval * (0xfc - 0xb0) / 0xb8); gamma[12] = 0xbf + (gval * (0xfb - 0xbf) / 0xb8); gamma[13] = 0xce + (gval * (0xfb - 0xce) / 0xb8); gamma[14] = 0xdf + (gval * (0xfd - 0xdf) / 0xb8); gamma[15] = 0xea + (gval * (0xf9 - 0xea) / 0xb8); gamma[16] = 0xf5; reg_w(gspca_dev, 0x1190, gamma, 17); } static void set_redblue(struct gspca_dev *gspca_dev, s32 blue, s32 red) { reg_w1(gspca_dev, 0x118c, red); reg_w1(gspca_dev, 0x118f, blue); } static void set_hvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip) { u8 value, tslb; u16 value2; struct sd *sd = (struct sd *) gspca_dev; if ((sd->flags & FLIP_DETECT) && dmi_check_system(flip_dmi_table)) { hflip = !hflip; vflip = !vflip; } switch (sd->sensor) { case SENSOR_OV7660: value = 0x01; if (hflip) value |= 0x20; if (vflip) { value |= 0x10; sd->vstart = 2; } else { sd->vstart = 3; } reg_w1(gspca_dev, 0x1182, sd->vstart); i2c_w1(gspca_dev, 0x1e, value); break; case SENSOR_OV9650: i2c_r1(gspca_dev, 0x1e, &value); value &= ~0x30; tslb = 0x01; if (hflip) value |= 0x20; if (vflip) { value |= 0x10; tslb = 0x49; } i2c_w1(gspca_dev, 0x1e, value); i2c_w1(gspca_dev, 0x3a, tslb); break; case SENSOR_MT9V111: case SENSOR_MT9V011: i2c_r2(gspca_dev, 0x20, &value2); value2 &= ~0xc0a0; if (hflip) value2 |= 0x8080; if (vflip) value2 |= 0x4020; i2c_w2(gspca_dev, 0x20, value2); break; case SENSOR_MT9M112: case SENSOR_MT9M111: case SENSOR_MT9V112: i2c_r2(gspca_dev, 0x20, &value2); value2 &= ~0x0003; if (hflip) value2 |= 0x0002; if (vflip) value2 |= 0x0001; i2c_w2(gspca_dev, 0x20, value2); break; case SENSOR_HV7131R: i2c_r1(gspca_dev, 0x01, &value); value &= ~0x03; if (vflip) value |= 0x01; if (hflip) value |= 0x02; i2c_w1(gspca_dev, 0x01, value); break; } } static void set_exposure(struct gspca_dev *gspca_dev, s32 expo) { struct sd *sd = (struct sd *) gspca_dev; u8 exp[8] = {sd->i2c_intf, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}; int expo2; if (gspca_dev->streaming) exp[7] = 0x1e; switch (sd->sensor) { case SENSOR_OV7660: case SENSOR_OV7670: case SENSOR_OV9655: case SENSOR_OV9650: if (expo > 547) expo2 = 547; else expo2 = expo; exp[0] |= (2 << 4); exp[2] = 0x10; /* AECH */ exp[3] = expo2 >> 2; exp[7] = 0x10; i2c_w(gspca_dev, exp); exp[2] = 0x04; /* COM1 */ exp[3] = expo2 & 0x0003; exp[7] = 0x10; i2c_w(gspca_dev, exp); expo -= expo2; exp[7] = 0x1e; exp[0] |= (3 << 4); exp[2] = 0x2d; /* ADVFL & ADVFH */ exp[3] = expo; exp[4] = expo >> 8; break; case SENSOR_MT9M001: case SENSOR_MT9V112: case SENSOR_MT9V011: exp[0] |= (3 << 4); exp[2] = 0x09; exp[3] = expo >> 8; exp[4] = expo; break; case SENSOR_HV7131R: exp[0] |= (4 << 4); exp[2] = 0x25; exp[3] = expo >> 5; exp[4] = expo << 3; exp[5] = 0; break; default: return; } i2c_w(gspca_dev, exp); } static void set_gain(struct gspca_dev *gspca_dev, s32 g) { struct sd *sd = (struct sd *) gspca_dev; u8 gain[8] = {sd->i2c_intf, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}; if (gspca_dev->streaming) gain[7] = 0x15; /* or 1d ? */ switch (sd->sensor) { case SENSOR_OV7660: case SENSOR_OV7670: case SENSOR_SOI968: case SENSOR_OV9655: case SENSOR_OV9650: gain[0] |= (2 << 4); gain[3] = ov_gain[g]; break; case SENSOR_MT9V011: gain[0] |= (3 << 4); gain[2] = 0x35; gain[3] = micron1_gain[g] >> 8; gain[4] = micron1_gain[g]; break; case SENSOR_MT9V112: gain[0] |= (3 << 4); gain[2] = 0x2f; gain[3] = micron1_gain[g] >> 8; gain[4] = micron1_gain[g]; break; case SENSOR_MT9M001: gain[0] |= (3 << 4); gain[2] = 0x2f; gain[3] = micron2_gain[g] >> 8; gain[4] = micron2_gain[g]; break; case SENSOR_HV7131R: gain[0] |= (2 << 4); gain[2] = 0x30; gain[3] = hv7131r_gain[g]; break; default: return; } i2c_w(gspca_dev, gain); } static void set_led_mode(struct gspca_dev *gspca_dev, s32 val) { reg_w1(gspca_dev, 0x1007, 0x60); reg_w1(gspca_dev, 0x1006, val ? 0x40 : 0x00); } static void set_quality(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; jpeg_set_qual(sd->jpeg_hdr, val); reg_w1(gspca_dev, 0x1061, 0x01); /* stop transfer */ reg_w1(gspca_dev, 0x10e0, sd->fmt | 0x20); /* write QTAB */ reg_w(gspca_dev, 0x1100, &sd->jpeg_hdr[JPEG_QT0_OFFSET], 64); reg_w(gspca_dev, 0x1140, &sd->jpeg_hdr[JPEG_QT1_OFFSET], 64); reg_w1(gspca_dev, 0x1061, 0x03); /* restart transfer */ reg_w1(gspca_dev, 0x10e0, sd->fmt); sd->fmt ^= 0x0c; /* invert QTAB use + write */ reg_w1(gspca_dev, 0x10e0, sd->fmt); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int sd_dbg_g_register(struct gspca_dev *gspca_dev, struct v4l2_dbg_register *reg) { struct sd *sd = (struct sd *) gspca_dev; reg->size = 1; switch (reg->match.addr) { case 0: if (reg->reg < 0x1000 || reg->reg > 0x11ff) return -EINVAL; reg_r(gspca_dev, reg->reg, 1); reg->val = gspca_dev->usb_buf[0]; return gspca_dev->usb_err; case 1: if (sd->sensor >= SENSOR_MT9V011 && sd->sensor <= SENSOR_MT9M112) { i2c_r2(gspca_dev, reg->reg, (u16 *) ®->val); reg->size = 2; } else { i2c_r1(gspca_dev, reg->reg, (u8 *) ®->val); } return gspca_dev->usb_err; } return -EINVAL; } static int sd_dbg_s_register(struct gspca_dev *gspca_dev, const struct v4l2_dbg_register *reg) { struct sd *sd = (struct sd *) gspca_dev; switch (reg->match.addr) { case 0: if (reg->reg < 0x1000 || reg->reg > 0x11ff) return -EINVAL; reg_w1(gspca_dev, reg->reg, reg->val); return gspca_dev->usb_err; case 1: if (sd->sensor >= SENSOR_MT9V011 && sd->sensor <= SENSOR_MT9M112) { i2c_w2(gspca_dev, reg->reg, reg->val); } else { i2c_w1(gspca_dev, reg->reg, reg->val); } return gspca_dev->usb_err; } return -EINVAL; } static int sd_chip_info(struct gspca_dev *gspca_dev, struct v4l2_dbg_chip_info *chip) { if (chip->match.addr > 1) return -EINVAL; if (chip->match.addr == 1) strscpy(chip->name, "sensor", sizeof(chip->name)); return 0; } #endif static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->needs_full_bandwidth = 1; sd->sensor = id->driver_info >> 8; sd->i2c_addr = id->driver_info; sd->flags = id->driver_info >> 16; sd->i2c_intf = 0x80; /* i2c 100 Kb/s */ switch (sd->sensor) { case SENSOR_MT9M112: case SENSOR_MT9M111: case SENSOR_OV9650: case SENSOR_SOI968: cam->cam_mode = sxga_mode; cam->nmodes = ARRAY_SIZE(sxga_mode); break; case SENSOR_MT9M001: cam->cam_mode = mono_mode; cam->nmodes = ARRAY_SIZE(mono_mode); break; case SENSOR_HV7131R: sd->i2c_intf = 0x81; /* i2c 400 Kb/s */ fallthrough; default: cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); break; } sd->old_step = 0; sd->older_step = 0; sd->exposure_step = 16; INIT_WORK(&sd->work, qual_upd); return 0; } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { /* color control cluster */ case V4L2_CID_BRIGHTNESS: set_cmatrix(gspca_dev, sd->brightness->val, sd->contrast->val, sd->saturation->val, sd->hue->val); break; case V4L2_CID_GAMMA: set_gamma(gspca_dev, ctrl->val); break; /* blue/red balance cluster */ case V4L2_CID_BLUE_BALANCE: set_redblue(gspca_dev, sd->blue->val, sd->red->val); break; /* h/vflip cluster */ case V4L2_CID_HFLIP: set_hvflip(gspca_dev, sd->hflip->val, sd->vflip->val); break; /* standalone exposure control */ case V4L2_CID_EXPOSURE: set_exposure(gspca_dev, ctrl->val); break; /* standalone gain control */ case V4L2_CID_GAIN: set_gain(gspca_dev, ctrl->val); break; /* autogain + exposure or gain control cluster */ case V4L2_CID_AUTOGAIN: if (sd->sensor == SENSOR_SOI968) set_gain(gspca_dev, sd->gain->val); else set_exposure(gspca_dev, sd->exposure->val); break; case V4L2_CID_JPEG_COMPRESSION_QUALITY: set_quality(gspca_dev, ctrl->val); break; case V4L2_CID_FLASH_LED_MODE: set_led_mode(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 13); sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 127); sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 127); sd->saturation = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 127); sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HUE, -180, 180, 1, 0); sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAMMA, 0, 255, 1, 0x10); sd->blue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0x28); sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 127, 1, 0x28); if (sd->sensor != SENSOR_OV9655 && sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_OV7670 && sd->sensor != SENSOR_MT9M001 && sd->sensor != SENSOR_MT9VPRB) { sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); } if (sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_MT9VPRB && sd->sensor != SENSOR_MT9M112 && sd->sensor != SENSOR_MT9M111 && sd->sensor != SENSOR_MT9V111) sd->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 0x1780, 1, 0x33); if (sd->sensor != SENSOR_MT9VPRB && sd->sensor != SENSOR_MT9M112 && sd->sensor != SENSOR_MT9M111 && sd->sensor != SENSOR_MT9V111) { sd->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 28, 1, 0); sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); } sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80); if (sd->flags & HAS_LED_TORCH) sd->led_mode = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_FLASH_LED_MODE, V4L2_FLASH_LED_MODE_TORCH, ~0x5, V4L2_FLASH_LED_MODE_NONE); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(4, &sd->brightness); v4l2_ctrl_cluster(2, &sd->blue); if (sd->hflip) v4l2_ctrl_cluster(2, &sd->hflip); if (sd->autogain) { if (sd->sensor == SENSOR_SOI968) /* this sensor doesn't have the exposure control and autogain is clustered with gain instead. This works because sd->exposure == NULL. */ v4l2_ctrl_auto_cluster(3, &sd->autogain, 0, false); else /* Otherwise autogain is clustered with exposure. */ v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false); } return 0; } static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; u8 value; u8 i2c_init[9] = { 0x80, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03 }; for (i = 0; i < ARRAY_SIZE(bridge_init); i++) { value = bridge_init[i][1]; reg_w(gspca_dev, bridge_init[i][0], &value, 1); if (gspca_dev->usb_err < 0) { pr_err("Device initialization failed\n"); return gspca_dev->usb_err; } } if (sd->flags & LED_REVERSE) reg_w1(gspca_dev, 0x1006, 0x00); else reg_w1(gspca_dev, 0x1006, 0x20); reg_w(gspca_dev, 0x10c0, i2c_init, 9); if (gspca_dev->usb_err < 0) { pr_err("Device initialization failed\n"); return gspca_dev->usb_err; } switch (sd->sensor) { case SENSOR_OV9650: ov9650_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; pr_info("OV9650 sensor detected\n"); break; case SENSOR_OV9655: ov9655_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; pr_info("OV9655 sensor detected\n"); break; case SENSOR_SOI968: soi968_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; pr_info("SOI968 sensor detected\n"); break; case SENSOR_OV7660: ov7660_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; pr_info("OV7660 sensor detected\n"); break; case SENSOR_OV7670: ov7670_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; pr_info("OV7670 sensor detected\n"); break; case SENSOR_MT9VPRB: mt9v_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; pr_info("MT9VPRB sensor detected\n"); break; case SENSOR_MT9M111: mt9m111_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; pr_info("MT9M111 sensor detected\n"); break; case SENSOR_MT9M112: mt9m112_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; pr_info("MT9M112 sensor detected\n"); break; case SENSOR_MT9M001: mt9m001_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; break; case SENSOR_HV7131R: hv7131r_init_sensor(gspca_dev); if (gspca_dev->usb_err < 0) break; pr_info("HV7131R sensor detected\n"); break; default: pr_err("Unsupported sensor\n"); gspca_dev->usb_err = -ENODEV; } return gspca_dev->usb_err; } static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode) { struct sd *sd = (struct sd *) gspca_dev; u8 value; switch (sd->sensor) { case SENSOR_SOI968: if (mode & MODE_SXGA) { i2c_w1(gspca_dev, 0x17, 0x1d); i2c_w1(gspca_dev, 0x18, 0xbd); i2c_w1(gspca_dev, 0x19, 0x01); i2c_w1(gspca_dev, 0x1a, 0x81); i2c_w1(gspca_dev, 0x12, 0x00); sd->hstart = 140; sd->vstart = 19; } else { i2c_w1(gspca_dev, 0x17, 0x13); i2c_w1(gspca_dev, 0x18, 0x63); i2c_w1(gspca_dev, 0x19, 0x01); i2c_w1(gspca_dev, 0x1a, 0x79); i2c_w1(gspca_dev, 0x12, 0x40); sd->hstart = 60; sd->vstart = 11; } break; case SENSOR_OV9650: if (mode & MODE_SXGA) { i2c_w1(gspca_dev, 0x17, 0x1b); i2c_w1(gspca_dev, 0x18, 0xbc); i2c_w1(gspca_dev, 0x19, 0x01); i2c_w1(gspca_dev, 0x1a, 0x82); i2c_r1(gspca_dev, 0x12, &value); i2c_w1(gspca_dev, 0x12, value & 0x07); } else { i2c_w1(gspca_dev, 0x17, 0x24); i2c_w1(gspca_dev, 0x18, 0xc5); i2c_w1(gspca_dev, 0x19, 0x00); i2c_w1(gspca_dev, 0x1a, 0x3c); i2c_r1(gspca_dev, 0x12, &value); i2c_w1(gspca_dev, 0x12, (value & 0x7) | 0x40); } break; case SENSOR_MT9M112: case SENSOR_MT9M111: if (mode & MODE_SXGA) { i2c_w2(gspca_dev, 0xf0, 0x0002); i2c_w2(gspca_dev, 0xc8, 0x970b); i2c_w2(gspca_dev, 0xf0, 0x0000); } else { i2c_w2(gspca_dev, 0xf0, 0x0002); i2c_w2(gspca_dev, 0xc8, 0x8000); i2c_w2(gspca_dev, 0xf0, 0x0000); } break; } } static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct usb_interface *intf; u32 flags = gspca_dev->cam.cam_mode[(int)gspca_dev->curr_mode].priv; /* * When using the SN9C20X_I420 fmt the sn9c20x needs more bandwidth * than our regular bandwidth calculations reserve, so we force the * use of a specific altsetting when using the SN9C20X_I420 fmt. */ if (!(flags & (MODE_RAW | MODE_JPEG))) { intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); if (intf->num_altsetting != 9) { pr_warn("sn9c20x camera with unknown number of alt settings (%d), please report!\n", intf->num_altsetting); gspca_dev->alt = intf->num_altsetting; return 0; } switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ gspca_dev->alt = 2; break; case 320: /* 320x240 */ gspca_dev->alt = 6; break; default: /* >= 640x480 */ gspca_dev->alt = 9; break; } } return 0; } #define HW_WIN(mode, hstart, vstart) \ ((const u8 []){hstart, 0, vstart, 0, \ (mode & MODE_SXGA ? 1280 >> 4 : 640 >> 4), \ (mode & MODE_SXGA ? 1024 >> 3 : 480 >> 3)}) #define CLR_WIN(width, height) \ ((const u8 [])\ {0, width >> 2, 0, height >> 1,\ ((width >> 10) & 0x01) | ((height >> 8) & 0x6)}) static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; int width = gspca_dev->pixfmt.width; int height = gspca_dev->pixfmt.height; u8 fmt, scale = 0; jpeg_define(sd->jpeg_hdr, height, width, 0x21); jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual)); if (mode & MODE_RAW) fmt = 0x2d; else if (mode & MODE_JPEG) fmt = 0x24; else fmt = 0x2f; /* YUV 420 */ sd->fmt = fmt; switch (mode & SCALE_MASK) { case SCALE_1280x1024: scale = 0xc0; pr_info("Set 1280x1024\n"); break; case SCALE_640x480: scale = 0x80; pr_info("Set 640x480\n"); break; case SCALE_320x240: scale = 0x90; pr_info("Set 320x240\n"); break; case SCALE_160x120: scale = 0xa0; pr_info("Set 160x120\n"); break; } configure_sensor_output(gspca_dev, mode); reg_w(gspca_dev, 0x1100, &sd->jpeg_hdr[JPEG_QT0_OFFSET], 64); reg_w(gspca_dev, 0x1140, &sd->jpeg_hdr[JPEG_QT1_OFFSET], 64); reg_w(gspca_dev, 0x10fb, CLR_WIN(width, height), 5); reg_w(gspca_dev, 0x1180, HW_WIN(mode, sd->hstart, sd->vstart), 6); reg_w1(gspca_dev, 0x1189, scale); reg_w1(gspca_dev, 0x10e0, fmt); set_cmatrix(gspca_dev, v4l2_ctrl_g_ctrl(sd->brightness), v4l2_ctrl_g_ctrl(sd->contrast), v4l2_ctrl_g_ctrl(sd->saturation), v4l2_ctrl_g_ctrl(sd->hue)); set_gamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma)); set_redblue(gspca_dev, v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); if (sd->gain) set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain)); if (sd->exposure) set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure)); if (sd->hflip) set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip), v4l2_ctrl_g_ctrl(sd->vflip)); reg_w1(gspca_dev, 0x1007, 0x20); reg_w1(gspca_dev, 0x1061, 0x03); /* if JPEG, prepare the compression quality update */ if (mode & MODE_JPEG) { sd->pktsz = sd->npkt = 0; sd->nchg = 0; } if (sd->led_mode) v4l2_ctrl_s_ctrl(sd->led_mode, 0); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { reg_w1(gspca_dev, 0x1007, 0x00); reg_w1(gspca_dev, 0x1061, 0x01); } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; mutex_unlock(&gspca_dev->usb_lock); flush_work(&sd->work); mutex_lock(&gspca_dev->usb_lock); } static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum) { struct sd *sd = (struct sd *) gspca_dev; s32 cur_exp = v4l2_ctrl_g_ctrl(sd->exposure); s32 max = sd->exposure->maximum - sd->exposure_step; s32 min = sd->exposure->minimum + sd->exposure_step; s16 new_exp; /* * some hardcoded values are present * like those for maximal/minimal exposure * and exposure steps */ if (avg_lum < MIN_AVG_LUM) { if (cur_exp > max) return; new_exp = cur_exp + sd->exposure_step; if (new_exp > max) new_exp = max; if (new_exp < min) new_exp = min; v4l2_ctrl_s_ctrl(sd->exposure, new_exp); sd->older_step = sd->old_step; sd->old_step = 1; if (sd->old_step ^ sd->older_step) sd->exposure_step /= 2; else sd->exposure_step += 2; } if (avg_lum > MAX_AVG_LUM) { if (cur_exp < min) return; new_exp = cur_exp - sd->exposure_step; if (new_exp > max) new_exp = max; if (new_exp < min) new_exp = min; v4l2_ctrl_s_ctrl(sd->exposure, new_exp); sd->older_step = sd->old_step; sd->old_step = 0; if (sd->old_step ^ sd->older_step) sd->exposure_step /= 2; else sd->exposure_step += 2; } } static void do_autogain(struct gspca_dev *gspca_dev, u16 avg_lum) { struct sd *sd = (struct sd *) gspca_dev; s32 cur_gain = v4l2_ctrl_g_ctrl(sd->gain); if (avg_lum < MIN_AVG_LUM && cur_gain < sd->gain->maximum) v4l2_ctrl_s_ctrl(sd->gain, cur_gain + 1); if (avg_lum > MAX_AVG_LUM && cur_gain > sd->gain->minimum) v4l2_ctrl_s_ctrl(sd->gain, cur_gain - 1); } static void sd_dqcallback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int avg_lum; if (sd->autogain == NULL || !v4l2_ctrl_g_ctrl(sd->autogain)) return; avg_lum = atomic_read(&sd->avg_lum); if (sd->sensor == SENSOR_SOI968) do_autogain(gspca_dev, avg_lum); else do_autoexposure(gspca_dev, avg_lum); } /* JPEG quality update */ /* This function is executed from a work queue. */ static void qual_upd(struct work_struct *work) { struct sd *sd = container_of(work, struct sd, work); struct gspca_dev *gspca_dev = &sd->gspca_dev; s32 qual = v4l2_ctrl_g_ctrl(sd->jpegqual); /* To protect gspca_dev->usb_buf and gspca_dev->usb_err */ mutex_lock(&gspca_dev->usb_lock); gspca_dbg(gspca_dev, D_STREAM, "qual_upd %d%%\n", qual); gspca_dev->usb_err = 0; set_quality(gspca_dev, qual); mutex_unlock(&gspca_dev->usb_lock); } #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet */ int len) /* interrupt packet length */ { struct sd *sd = (struct sd *) gspca_dev; if (!(sd->flags & HAS_NO_BUTTON) && len == 1) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); return 0; } return -EINVAL; } #endif /* check the JPEG compression */ static void transfer_check(struct gspca_dev *gspca_dev, u8 *data) { struct sd *sd = (struct sd *) gspca_dev; int new_qual, r; new_qual = 0; /* if USB error, discard the frame and decrease the quality */ if (data[6] & 0x08) { /* USB FIFO full */ gspca_dev->last_packet_type = DISCARD_PACKET; new_qual = -5; } else { /* else, compute the filling rate and a new JPEG quality */ r = (sd->pktsz * 100) / (sd->npkt * gspca_dev->urb[0]->iso_frame_desc[0].length); if (r >= 85) new_qual = -3; else if (r < 75) new_qual = 2; } if (new_qual != 0) { sd->nchg += new_qual; if (sd->nchg < -6 || sd->nchg >= 12) { /* Note: we are in interrupt context, so we can't use v4l2_ctrl_g/s_ctrl here. Access the value directly instead. */ s32 curqual = sd->jpegqual->cur.val; sd->nchg = 0; new_qual += curqual; if (new_qual < sd->jpegqual->minimum) new_qual = sd->jpegqual->minimum; else if (new_qual > sd->jpegqual->maximum) new_qual = sd->jpegqual->maximum; if (new_qual != curqual) { sd->jpegqual->cur.val = new_qual; schedule_work(&sd->work); } } } else { sd->nchg = 0; } sd->pktsz = sd->npkt = 0; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int avg_lum, is_jpeg; static const u8 frame_header[] = { 0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96 }; is_jpeg = (sd->fmt & 0x03) == 0; if (len >= 64 && memcmp(data, frame_header, 6) == 0) { avg_lum = ((data[35] >> 2) & 3) | (data[20] << 2) | (data[19] << 10); avg_lum += ((data[35] >> 4) & 3) | (data[22] << 2) | (data[21] << 10); avg_lum += ((data[35] >> 6) & 3) | (data[24] << 2) | (data[23] << 10); avg_lum += (data[36] & 3) | (data[26] << 2) | (data[25] << 10); avg_lum += ((data[36] >> 2) & 3) | (data[28] << 2) | (data[27] << 10); avg_lum += ((data[36] >> 4) & 3) | (data[30] << 2) | (data[29] << 10); avg_lum += ((data[36] >> 6) & 3) | (data[32] << 2) | (data[31] << 10); avg_lum += ((data[44] >> 4) & 3) | (data[34] << 2) | (data[33] << 10); avg_lum >>= 9; atomic_set(&sd->avg_lum, avg_lum); if (is_jpeg) transfer_check(gspca_dev, data); gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); len -= 64; if (len == 0) return; data += 64; } if (gspca_dev->last_packet_type == LAST_PACKET) { if (is_jpeg) { gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } else { gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); } } else { /* if JPEG, count the packets and their size */ if (is_jpeg) { sd->npkt++; sd->pktsz += len; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = KBUILD_MODNAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif .dq_callback = sd_dqcallback, #ifdef CONFIG_VIDEO_ADV_DEBUG .set_register = sd_dbg_s_register, .get_register = sd_dbg_g_register, .get_chip_info = sd_chip_info, #endif }; #define SN9C20X(sensor, i2c_addr, flags) \ .driver_info = ((flags & 0xff) << 16) \ | (SENSOR_ ## sensor << 8) \ | (i2c_addr) static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)}, {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, HAS_LED_TORCH)}, {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)}, {USB_DEVICE(0x0c45, 0x624c), SN9C20X(MT9M112, 0x5d, 0)}, {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, LED_REVERSE)}, {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, (FLIP_DETECT | HAS_NO_BUTTON))}, {USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)}, {USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)}, {USB_DEVICE(0x0c45, 0x6260), SN9C20X(OV7670, 0x21, 0)}, {USB_DEVICE(0x0c45, 0x6270), SN9C20X(MT9VPRB, 0x00, 0)}, {USB_DEVICE(0x0c45, 0x627b), SN9C20X(OV7660, 0x21, FLIP_DETECT)}, {USB_DEVICE(0x0c45, 0x627c), SN9C20X(HV7131R, 0x11, 0)}, {USB_DEVICE(0x0c45, 0x627f), SN9C20X(OV9650, 0x30, 0)}, {USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)}, {USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)}, {USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, 0)}, {USB_DEVICE(0x0c45, 0x628c), SN9C20X(MT9M112, 0x5d, 0)}, {USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)}, {USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)}, {USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)}, {USB_DEVICE(0x0c45, 0x62b0), SN9C20X(MT9VPRB, 0x00, 0)}, {USB_DEVICE(0x0c45, 0x62b3), SN9C20X(OV9655, 0x30, LED_REVERSE)}, {USB_DEVICE(0x0c45, 0x62bb), SN9C20X(OV7660, 0x21, LED_REVERSE)}, {USB_DEVICE(0x0c45, 0x62bc), SN9C20X(HV7131R, 0x11, 0)}, {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)}, {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)}, {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)}, {USB_DEVICE(0x0458, 0x7045), SN9C20X(MT9M112, 0x5d, LED_REVERSE)}, {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)}, {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)}, {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)}, {USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)}, {USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)}, {USB_DEVICE(0xa168, 0x0618), SN9C20X(HV7131R, 0x11, 0)}, {USB_DEVICE(0xa168, 0x0614), SN9C20X(MT9M111, 0x5d, 0)}, {USB_DEVICE(0xa168, 0x0615), SN9C20X(MT9M111, 0x5d, 0)}, {USB_DEVICE(0xa168, 0x0617), SN9C20X(MT9M111, 0x5d, 0)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = KBUILD_MODNAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
5 27 27 219 270 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_LABELS_H #define _NF_CONNTRACK_LABELS_H #include <linux/netfilter/nf_conntrack_common.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> #include <linux/types.h> #include <net/net_namespace.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> #include <uapi/linux/netfilter/xt_connlabel.h> #define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE) struct nf_conn_labels { unsigned long bits[NF_CT_LABELS_MAX_SIZE / sizeof(long)]; }; /* Can't use nf_ct_ext_find(), flow dissector cannot use symbols * exported by nf_conntrack module. */ static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_LABELS struct nf_ct_ext *ext = ct->ext; if (!ext || !__nf_ct_ext_exist(ext, NF_CT_EXT_LABELS)) return NULL; return (void *)ct->ext + ct->ext->offset[NF_CT_EXT_LABELS]; #else return NULL; #endif } static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_LABELS struct net *net = nf_ct_net(ct); if (atomic_read(&net->ct.labels_used) == 0) return NULL; return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); #else return NULL; #endif } int nf_connlabels_replace(struct nf_conn *ct, const u32 *data, const u32 *mask, unsigned int words); #ifdef CONFIG_NF_CONNTRACK_LABELS int nf_connlabels_get(struct net *net, unsigned int bit); void nf_connlabels_put(struct net *net); #else static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; } static inline void nf_connlabels_put(struct net *net) {} #endif #endif /* _NF_CONNTRACK_LABELS_H */ |
9 9 9 9 9 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 | /* * linux/fs/nls/nls_cp874.c * * Charset cp874 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2026, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 0x90*/ 0x0000, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 0xa0*/ 0x00a0, 0x0e01, 0x0e02, 0x0e03, 0x0e04, 0x0e05, 0x0e06, 0x0e07, 0x0e08, 0x0e09, 0x0e0a, 0x0e0b, 0x0e0c, 0x0e0d, 0x0e0e, 0x0e0f, /* 0xb0*/ 0x0e10, 0x0e11, 0x0e12, 0x0e13, 0x0e14, 0x0e15, 0x0e16, 0x0e17, 0x0e18, 0x0e19, 0x0e1a, 0x0e1b, 0x0e1c, 0x0e1d, 0x0e1e, 0x0e1f, /* 0xc0*/ 0x0e20, 0x0e21, 0x0e22, 0x0e23, 0x0e24, 0x0e25, 0x0e26, 0x0e27, 0x0e28, 0x0e29, 0x0e2a, 0x0e2b, 0x0e2c, 0x0e2d, 0x0e2e, 0x0e2f, /* 0xd0*/ 0x0e30, 0x0e31, 0x0e32, 0x0e33, 0x0e34, 0x0e35, 0x0e36, 0x0e37, 0x0e38, 0x0e39, 0x0e3a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0e3f, /* 0xe0*/ 0x0e40, 0x0e41, 0x0e42, 0x0e43, 0x0e44, 0x0e45, 0x0e46, 0x0e47, 0x0e48, 0x0e49, 0x0e4a, 0x0e4b, 0x0e4c, 0x0e4d, 0x0e4e, 0x0e4f, /* 0xf0*/ 0x0e50, 0x0e51, 0x0e52, 0x0e53, 0x0e54, 0x0e55, 0x0e56, 0x0e57, 0x0e58, 0x0e59, 0x0e5a, 0x0e5b, 0x0000, 0x0000, 0x0000, 0x0000, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char page0e[256] = { 0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x08-0x0f */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x91, 0x92, 0x00, 0x00, 0x93, 0x94, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page0e, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp874", .alias = "tis-620", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_cp874(void) { return register_nls(&table); } static void __exit exit_nls_cp874(void) { unregister_nls(&table); } module_init(init_nls_cp874) module_exit(exit_nls_cp874) MODULE_DESCRIPTION("NLS Thai charset (CP874, TIS-620)"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS_NLS(tis-620); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 | /* IPv6-specific defines for netfilter. * (C)1998 Rusty Russell -- This code is GPL. * (C)1999 David Jeffery * this header was blatantly ripped from netfilter_ipv4.h * it's amazing what adding a bunch of 6s can do =8^) */ #ifndef __LINUX_IP6_NETFILTER_H #define __LINUX_IP6_NETFILTER_H #include <uapi/linux/netfilter_ipv6.h> #include <net/tcp.h> /* Check for an extension */ static inline int nf_ip6_ext_hdr(u8 nexthdr) { return (nexthdr == IPPROTO_HOPOPTS) || (nexthdr == IPPROTO_ROUTING) || (nexthdr == IPPROTO_FRAGMENT) || (nexthdr == IPPROTO_ESP) || (nexthdr == IPPROTO_AH) || (nexthdr == IPPROTO_NONE) || (nexthdr == IPPROTO_DSTOPTS); } /* Extra routing may needed on local out, as the QUEUE target never returns * control to the table. */ struct ip6_rt_info { struct in6_addr daddr; struct in6_addr saddr; u_int32_t mark; }; struct nf_queue_entry; struct nf_bridge_frag_data; /* * Hook functions for ipv6 to allow xt_* modules to be built-in even * if IPv6 is a module. */ struct nf_ipv6_ops { #if IS_MODULE(CONFIG_IPV6) int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*dev_get_saddr)(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); u32 (*cookie_init_sequence)(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); int (*cookie_v6_check)(const struct ipv6hdr *iph, const struct tcphdr *th); #endif void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); #if IS_MODULE(CONFIG_IPV6) int (*br_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)); #endif }; #ifdef CONFIG_NETFILTER #include <net/addrconf.h> extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return rcu_dereference(nf_ipv6_ops); } static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return 1; return v6_ops->chk_addr(net, addr, dev, strict); #elif IS_BUILTIN(CONFIG_IPV6) return ipv6_chk_addr(net, addr, dev, strict); #else return 1; #endif } int __nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); if (v6ops) return v6ops->route(net, dst, fl, strict); return -EHOSTUNREACH; #endif #if IS_BUILTIN(CONFIG_IPV6) return __nf_ip6_route(net, dst, fl, strict); #else return -EHOSTUNREACH; #endif } #include <net/netfilter/ipv6/nf_defrag_ipv6.h> int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)); static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return 1; return v6_ops->br_fragment(net, sk, skb, data, output); #elif IS_BUILTIN(CONFIG_IPV6) return br_ip6_fragment(net, sk, skb, data, output); #else return 1; #endif } int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return -EHOSTUNREACH; return v6_ops->route_me_harder(net, sk, skb); #elif IS_BUILTIN(CONFIG_IPV6) return ip6_route_me_harder(net, sk, skb); #else return -EHOSTUNREACH; #endif } static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp) { #if IS_ENABLED(CONFIG_SYN_COOKIES) #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (v6_ops) return v6_ops->cookie_init_sequence(iph, th, mssp); #elif IS_BUILTIN(CONFIG_IPV6) return __cookie_v6_init_sequence(iph, th, mssp); #endif #endif return 0; } static inline int nf_cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th) { #if IS_ENABLED(CONFIG_SYN_COOKIES) #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (v6_ops) return v6_ops->cookie_v6_check(iph, th); #elif IS_BUILTIN(CONFIG_IPV6) return __cookie_v6_check(iph, th); #endif #endif return 0; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen); int ipv6_netfilter_init(void); void ipv6_netfilter_fini(void); #else /* CONFIG_NETFILTER */ static inline int ipv6_netfilter_init(void) { return 0; } static inline void ipv6_netfilter_fini(void) { return; } static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; } #endif /* CONFIG_NETFILTER */ #endif /*__LINUX_IP6_NETFILTER_H*/ |
27 10 9 1 9 9 9 9 9 9 2 8 8 1 7 7 7 2 4 3 2 4 13 13 11 11 10 11 7 6 3 7 2 3 3 3 2 3 1 161 163 162 1 163 1 1 1 8 8 8 8 8 8 8 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 2 2 2 2 2 8 8 8 8 8 8 8 7 8 7 7 7 7 2 8 8 9 10 10 10 10 10 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 | // SPDX-License-Identifier: GPL-2.0 #include <linux/compiler_types.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/gfp.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/ipc_namespace.h> #include <linux/kdev_t.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/namei.h> #include <linux/magic.h> #include <linux/major.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/mount.h> #include <linux/fs_parser.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock_types.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/user_namespace.h> #include <linux/xarray.h> #include <uapi/linux/android/binder.h> #include <uapi/linux/android/binderfs.h> #include "binder_internal.h" #define FIRST_INODE 1 #define SECOND_INODE 2 #define INODE_OFFSET 3 #define BINDERFS_MAX_MINOR (1U << MINORBITS) /* Ensure that the initial ipc namespace always has devices available. */ #define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4) static dev_t binderfs_dev; static DEFINE_MUTEX(binderfs_minors_mutex); static DEFINE_IDA(binderfs_minors); enum binderfs_param { Opt_max, Opt_stats_mode, }; enum binderfs_stats_mode { binderfs_stats_mode_unset, binderfs_stats_mode_global, }; struct binder_features { bool oneway_spam_detection; bool extended_error; bool freeze_notification; }; static const struct constant_table binderfs_param_stats[] = { { "global", binderfs_stats_mode_global }, {} }; static const struct fs_parameter_spec binderfs_fs_parameters[] = { fsparam_u32("max", Opt_max), fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats), {} }; static struct binder_features binder_features = { .oneway_spam_detection = true, .extended_error = true, .freeze_notification = true, }; static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb) { return sb->s_fs_info; } bool is_binderfs_device(const struct inode *inode) { if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) return true; return false; } /** * binderfs_binder_device_create - allocate inode from super block of a * binderfs mount * @ref_inode: inode from which the super block will be taken * @userp: buffer to copy information about new device for userspace to * @req: struct binderfs_device as copied from userspace * * This function allocates a new binder_device and reserves a new minor * number for it. * Minor numbers are limited and tracked globally in binderfs_minors. The * function will stash a struct binder_device for the specific binder * device in i_private of the inode. * It will go on to allocate a new inode from the super block of the * filesystem mount, stash a struct binder_device in its i_private field * and attach a dentry to that inode. * * Return: 0 on success, negative errno on failure */ static int binderfs_binder_device_create(struct inode *ref_inode, struct binderfs_device __user *userp, struct binderfs_device *req) { int minor, ret; struct dentry *dentry, *root; struct binder_device *device; char *name = NULL; size_t name_len; struct inode *inode = NULL; struct super_block *sb = ref_inode->i_sb; struct binderfs_info *info = sb->s_fs_info; #if defined(CONFIG_IPC_NS) bool use_reserve = (info->ipc_ns == &init_ipc_ns); #else bool use_reserve = true; #endif /* Reserve new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); if (++info->device_count <= info->mount_opts.max) minor = ida_alloc_max(&binderfs_minors, use_reserve ? BINDERFS_MAX_MINOR : BINDERFS_MAX_MINOR_CAPPED, GFP_KERNEL); else minor = -ENOSPC; if (minor < 0) { --info->device_count; mutex_unlock(&binderfs_minors_mutex); return minor; } mutex_unlock(&binderfs_minors_mutex); ret = -ENOMEM; device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) goto err; inode = new_inode(sb); if (!inode) goto err; inode->i_ino = minor + INODE_OFFSET; simple_inode_init_ts(inode); init_special_inode(inode, S_IFCHR | 0600, MKDEV(MAJOR(binderfs_dev), minor)); inode->i_fop = &binder_fops; inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */ name_len = strlen(req->name); /* Make sure to include terminating NUL byte */ name = kmemdup(req->name, name_len + 1, GFP_KERNEL); if (!name) goto err; refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->context.binder_context_mgr_uid = INVALID_UID; device->context.name = name; device->miscdev.name = name; device->miscdev.minor = minor; mutex_init(&device->context.context_mgr_node_lock); req->major = MAJOR(binderfs_dev); req->minor = minor; if (userp && copy_to_user(userp, req, sizeof(*req))) { ret = -EFAULT; goto err; } root = sb->s_root; inode_lock(d_inode(root)); /* look it up */ dentry = lookup_one_len(name, root, name_len); if (IS_ERR(dentry)) { inode_unlock(d_inode(root)); ret = PTR_ERR(dentry); goto err; } if (d_really_is_positive(dentry)) { /* already exists */ dput(dentry); inode_unlock(d_inode(root)); ret = -EEXIST; goto err; } inode->i_private = device; d_instantiate(dentry, inode); fsnotify_create(root->d_inode, dentry); inode_unlock(d_inode(root)); binder_add_device(device); return 0; err: kfree(name); kfree(device); mutex_lock(&binderfs_minors_mutex); --info->device_count; ida_free(&binderfs_minors, minor); mutex_unlock(&binderfs_minors_mutex); iput(inode); return ret; } /** * binder_ctl_ioctl - handle binder device node allocation requests * * The request handler for the binder-control device. All requests operate on * the binderfs mount the binder-control device resides in: * - BINDER_CTL_ADD * Allocate a new binder device. * * Return: %0 on success, negative errno on failure. */ static long binder_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -EINVAL; struct inode *inode = file_inode(file); struct binderfs_device __user *device = (struct binderfs_device __user *)arg; struct binderfs_device device_req; switch (cmd) { case BINDER_CTL_ADD: ret = copy_from_user(&device_req, device, sizeof(device_req)); if (ret) { ret = -EFAULT; break; } ret = binderfs_binder_device_create(inode, device, &device_req); break; default: break; } return ret; } static void binderfs_evict_inode(struct inode *inode) { struct binder_device *device = inode->i_private; struct binderfs_info *info = BINDERFS_SB(inode->i_sb); clear_inode(inode); if (!S_ISCHR(inode->i_mode) || !device) return; mutex_lock(&binderfs_minors_mutex); --info->device_count; ida_free(&binderfs_minors, device->miscdev.minor); mutex_unlock(&binderfs_minors_mutex); if (refcount_dec_and_test(&device->ref)) { hlist_del_init(&device->hlist); kfree(device->context.name); kfree(device); } } static int binderfs_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { int opt; struct binderfs_mount_opts *ctx = fc->fs_private; struct fs_parse_result result; opt = fs_parse(fc, binderfs_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_max: if (result.uint_32 > BINDERFS_MAX_MINOR) return invalfc(fc, "Bad value for '%s'", param->key); ctx->max = result.uint_32; break; case Opt_stats_mode: if (!capable(CAP_SYS_ADMIN)) return -EPERM; ctx->stats_mode = result.uint_32; break; default: return invalfc(fc, "Unsupported parameter '%s'", param->key); } return 0; } static int binderfs_fs_context_reconfigure(struct fs_context *fc) { struct binderfs_mount_opts *ctx = fc->fs_private; struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb); if (info->mount_opts.stats_mode != ctx->stats_mode) return invalfc(fc, "Binderfs stats mode cannot be changed during a remount"); info->mount_opts.stats_mode = ctx->stats_mode; info->mount_opts.max = ctx->max; return 0; } static int binderfs_show_options(struct seq_file *seq, struct dentry *root) { struct binderfs_info *info = BINDERFS_SB(root->d_sb); if (info->mount_opts.max <= BINDERFS_MAX_MINOR) seq_printf(seq, ",max=%d", info->mount_opts.max); switch (info->mount_opts.stats_mode) { case binderfs_stats_mode_unset: break; case binderfs_stats_mode_global: seq_printf(seq, ",stats=global"); break; } return 0; } static const struct super_operations binderfs_super_ops = { .evict_inode = binderfs_evict_inode, .show_options = binderfs_show_options, .statfs = simple_statfs, }; static inline bool is_binderfs_control_device(const struct dentry *dentry) { struct binderfs_info *info = dentry->d_sb->s_fs_info; return info->control_dentry == dentry; } static int binderfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { if (is_binderfs_control_device(old_dentry) || is_binderfs_control_device(new_dentry)) return -EPERM; return simple_rename(idmap, old_dir, old_dentry, new_dir, new_dentry, flags); } static int binderfs_unlink(struct inode *dir, struct dentry *dentry) { if (is_binderfs_control_device(dentry)) return -EPERM; return simple_unlink(dir, dentry); } static const struct file_operations binder_ctl_fops = { .owner = THIS_MODULE, .open = nonseekable_open, .unlocked_ioctl = binder_ctl_ioctl, .compat_ioctl = binder_ctl_ioctl, .llseek = noop_llseek, }; /** * binderfs_binder_ctl_create - create a new binder-control device * @sb: super block of the binderfs mount * * This function creates a new binder-control device node in the binderfs mount * referred to by @sb. * * Return: 0 on success, negative errno on failure */ static int binderfs_binder_ctl_create(struct super_block *sb) { int minor, ret; struct dentry *dentry; struct binder_device *device; struct inode *inode = NULL; struct dentry *root = sb->s_root; struct binderfs_info *info = sb->s_fs_info; #if defined(CONFIG_IPC_NS) bool use_reserve = (info->ipc_ns == &init_ipc_ns); #else bool use_reserve = true; #endif device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) return -ENOMEM; /* If we have already created a binder-control node, return. */ if (info->control_dentry) { ret = 0; goto out; } ret = -ENOMEM; inode = new_inode(sb); if (!inode) goto out; /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); minor = ida_alloc_max(&binderfs_minors, use_reserve ? BINDERFS_MAX_MINOR : BINDERFS_MAX_MINOR_CAPPED, GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { ret = minor; goto out; } inode->i_ino = SECOND_INODE; simple_inode_init_ts(inode); init_special_inode(inode, S_IFCHR | 0600, MKDEV(MAJOR(binderfs_dev), minor)); inode->i_fop = &binder_ctl_fops; inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->miscdev.minor = minor; dentry = d_alloc_name(root, "binder-control"); if (!dentry) goto out; inode->i_private = device; info->control_dentry = dentry; d_add(dentry, inode); return 0; out: kfree(device); iput(inode); return ret; } static const struct inode_operations binderfs_dir_inode_operations = { .lookup = simple_lookup, .rename = binderfs_rename, .unlink = binderfs_unlink, }; static struct inode *binderfs_make_inode(struct super_block *sb, int mode) { struct inode *ret; ret = new_inode(sb); if (ret) { ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET); ret->i_mode = mode; simple_inode_init_ts(ret); } return ret; } static struct dentry *binderfs_create_dentry(struct dentry *parent, const char *name) { struct dentry *dentry; dentry = lookup_one_len(name, parent, strlen(name)); if (IS_ERR(dentry)) return dentry; /* Return error if the file/dir already exists. */ if (d_really_is_positive(dentry)) { dput(dentry); return ERR_PTR(-EEXIST); } return dentry; } void binderfs_remove_file(struct dentry *dentry) { struct inode *parent_inode; parent_inode = d_inode(dentry->d_parent); inode_lock(parent_inode); if (simple_positive(dentry)) { dget(dentry); simple_unlink(parent_inode, dentry); d_delete(dentry); dput(dentry); } inode_unlock(parent_inode); } struct dentry *binderfs_create_file(struct dentry *parent, const char *name, const struct file_operations *fops, void *data) { struct dentry *dentry; struct inode *new_inode, *parent_inode; struct super_block *sb; parent_inode = d_inode(parent); inode_lock(parent_inode); dentry = binderfs_create_dentry(parent, name); if (IS_ERR(dentry)) goto out; sb = parent_inode->i_sb; new_inode = binderfs_make_inode(sb, S_IFREG | 0444); if (!new_inode) { dput(dentry); dentry = ERR_PTR(-ENOMEM); goto out; } new_inode->i_fop = fops; new_inode->i_private = data; d_instantiate(dentry, new_inode); fsnotify_create(parent_inode, dentry); out: inode_unlock(parent_inode); return dentry; } static struct dentry *binderfs_create_dir(struct dentry *parent, const char *name) { struct dentry *dentry; struct inode *new_inode, *parent_inode; struct super_block *sb; parent_inode = d_inode(parent); inode_lock(parent_inode); dentry = binderfs_create_dentry(parent, name); if (IS_ERR(dentry)) goto out; sb = parent_inode->i_sb; new_inode = binderfs_make_inode(sb, S_IFDIR | 0755); if (!new_inode) { dput(dentry); dentry = ERR_PTR(-ENOMEM); goto out; } new_inode->i_fop = &simple_dir_operations; new_inode->i_op = &simple_dir_inode_operations; set_nlink(new_inode, 2); d_instantiate(dentry, new_inode); inc_nlink(parent_inode); fsnotify_mkdir(parent_inode, dentry); out: inode_unlock(parent_inode); return dentry; } static int binder_features_show(struct seq_file *m, void *unused) { bool *feature = m->private; seq_printf(m, "%d\n", *feature); return 0; } DEFINE_SHOW_ATTRIBUTE(binder_features); static int init_binder_features(struct super_block *sb) { struct dentry *dentry, *dir; dir = binderfs_create_dir(sb->s_root, "features"); if (IS_ERR(dir)) return PTR_ERR(dir); dentry = binderfs_create_file(dir, "oneway_spam_detection", &binder_features_fops, &binder_features.oneway_spam_detection); if (IS_ERR(dentry)) return PTR_ERR(dentry); dentry = binderfs_create_file(dir, "extended_error", &binder_features_fops, &binder_features.extended_error); if (IS_ERR(dentry)) return PTR_ERR(dentry); dentry = binderfs_create_file(dir, "freeze_notification", &binder_features_fops, &binder_features.freeze_notification); if (IS_ERR(dentry)) return PTR_ERR(dentry); return 0; } static int init_binder_logs(struct super_block *sb) { struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir; const struct binder_debugfs_entry *db_entry; struct binderfs_info *info; int ret = 0; binder_logs_root_dir = binderfs_create_dir(sb->s_root, "binder_logs"); if (IS_ERR(binder_logs_root_dir)) { ret = PTR_ERR(binder_logs_root_dir); goto out; } binder_for_each_debugfs_entry(db_entry) { dentry = binderfs_create_file(binder_logs_root_dir, db_entry->name, db_entry->fops, db_entry->data); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); goto out; } } proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc"); if (IS_ERR(proc_log_dir)) { ret = PTR_ERR(proc_log_dir); goto out; } info = sb->s_fs_info; info->proc_log_dir = proc_log_dir; out: return ret; } static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc) { int ret; struct binderfs_info *info; struct binderfs_mount_opts *ctx = fc->fs_private; struct inode *inode = NULL; struct binderfs_device device_info = {}; const char *name; size_t len; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; /* * The binderfs filesystem can be mounted by userns root in a * non-initial userns. By default such mounts have the SB_I_NODEV flag * set in s_iflags to prevent security issues where userns root can * just create random device nodes via mknod() since it owns the * filesystem mount. But binderfs does not allow to create any files * including devices nodes. The only way to create binder devices nodes * is through the binder-control device which userns root is explicitly * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both * necessary and safe. */ sb->s_iflags &= ~SB_I_NODEV; sb->s_iflags |= SB_I_NOEXEC; sb->s_magic = BINDERFS_SUPER_MAGIC; sb->s_op = &binderfs_super_ops; sb->s_time_gran = 1; sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); if (!sb->s_fs_info) return -ENOMEM; info = sb->s_fs_info; info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); info->root_gid = make_kgid(sb->s_user_ns, 0); if (!gid_valid(info->root_gid)) info->root_gid = GLOBAL_ROOT_GID; info->root_uid = make_kuid(sb->s_user_ns, 0); if (!uid_valid(info->root_uid)) info->root_uid = GLOBAL_ROOT_UID; info->mount_opts.max = ctx->max; info->mount_opts.stats_mode = ctx->stats_mode; inode = new_inode(sb); if (!inode) return -ENOMEM; inode->i_ino = FIRST_INODE; inode->i_fop = &simple_dir_operations; inode->i_mode = S_IFDIR | 0755; simple_inode_init_ts(inode); inode->i_op = &binderfs_dir_inode_operations; set_nlink(inode, 2); sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; ret = binderfs_binder_ctl_create(sb); if (ret) return ret; name = binder_devices_param; for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) { strscpy(device_info.name, name, len + 1); ret = binderfs_binder_device_create(inode, NULL, &device_info); if (ret) return ret; name += len; if (*name == ',') name++; } ret = init_binder_features(sb); if (ret) return ret; if (info->mount_opts.stats_mode == binderfs_stats_mode_global) return init_binder_logs(sb); return 0; } static int binderfs_fs_context_get_tree(struct fs_context *fc) { return get_tree_nodev(fc, binderfs_fill_super); } static void binderfs_fs_context_free(struct fs_context *fc) { struct binderfs_mount_opts *ctx = fc->fs_private; kfree(ctx); } static const struct fs_context_operations binderfs_fs_context_ops = { .free = binderfs_fs_context_free, .get_tree = binderfs_fs_context_get_tree, .parse_param = binderfs_fs_context_parse_param, .reconfigure = binderfs_fs_context_reconfigure, }; static int binderfs_init_fs_context(struct fs_context *fc) { struct binderfs_mount_opts *ctx; ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->max = BINDERFS_MAX_MINOR; ctx->stats_mode = binderfs_stats_mode_unset; fc->fs_private = ctx; fc->ops = &binderfs_fs_context_ops; return 0; } static void binderfs_kill_super(struct super_block *sb) { struct binderfs_info *info = sb->s_fs_info; /* * During inode eviction struct binderfs_info is needed. * So first wipe the super_block then free struct binderfs_info. */ kill_litter_super(sb); if (info && info->ipc_ns) put_ipc_ns(info->ipc_ns); kfree(info); } static struct file_system_type binder_fs_type = { .name = "binder", .init_fs_context = binderfs_init_fs_context, .parameters = binderfs_fs_parameters, .kill_sb = binderfs_kill_super, .fs_flags = FS_USERNS_MOUNT, }; int __init init_binderfs(void) { int ret; const char *name; size_t len; /* Verify that the default binderfs device names are valid. */ name = binder_devices_param; for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) { if (len > BINDERFS_MAX_NAME) return -E2BIG; name += len; if (*name == ',') name++; } /* Allocate new major number for binderfs. */ ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR, "binder"); if (ret) return ret; ret = register_filesystem(&binder_fs_type); if (ret) { unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); return ret; } return ret; } |
102 16 1 144 145 133 108 108 1 141 1 101 102 8 33 33 38 25 25 18 38 40 40 40 21 12 12 21 5 4 12 21 29 11 29 19 9 21 21 17 21 21 21 17 18 10 10 10 10 15 10 38 36 25 38 33 123 115 123 123 123 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 | // SPDX-License-Identifier: GPL-2.0+ /* * NILFS block mapping. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * Written by Koji Sato. */ #include <linux/fs.h> #include <linux/string.h> #include <linux/errno.h> #include "nilfs.h" #include "bmap.h" #include "btree.h" #include "direct.h" #include "btnode.h" #include "mdt.h" #include "dat.h" #include "alloc.h" struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) { struct the_nilfs *nilfs = bmap->b_inode->i_sb->s_fs_info; return nilfs->ns_dat; } static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, const char *fname, int err) { struct inode *inode = bmap->b_inode; if (err == -EINVAL) { __nilfs_error(inode->i_sb, fname, "broken bmap (inode number=%lu)", inode->i_ino); err = -EIO; } return err; } /** * nilfs_bmap_lookup_at_level - find a data block or node block * @bmap: bmap * @key: key * @level: level * @ptrp: place to store the value associated to @key * * Description: nilfs_bmap_lookup_at_level() finds a record whose key * matches @key in the block at @level of the bmap. The record associated * with @key is stored in the place pointed to by @ptrp. * * Return: 0 on success, or one of the following negative error codes on * failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOENT - A record associated with @key does not exist. * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, __u64 *ptrp) { sector_t blocknr; int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); if (ret < 0) goto out; if (NILFS_BMAP_USE_VBN(bmap)) { ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, &blocknr); if (!ret) *ptrp = blocknr; else if (ret == -ENOENT) { /* * If there was no valid entry in DAT for the block * address obtained by b_ops->bop_lookup, then pass * internal code -EINVAL to nilfs_bmap_convert_error * to treat it as metadata corruption. */ ret = -EINVAL; } } out: up_read(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, unsigned int maxblocks) { int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks); up_read(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) { __u64 keys[NILFS_BMAP_SMALL_HIGH + 1]; __u64 ptrs[NILFS_BMAP_SMALL_HIGH + 1]; int ret, n; if (bmap->b_ops->bop_check_insert != NULL) { ret = bmap->b_ops->bop_check_insert(bmap, key); if (ret > 0) { n = bmap->b_ops->bop_gather_data( bmap, keys, ptrs, NILFS_BMAP_SMALL_HIGH + 1); if (n < 0) return n; ret = nilfs_btree_convert_and_insert( bmap, key, ptr, keys, ptrs, n); if (ret == 0) bmap->b_u.u_flags |= NILFS_BMAP_LARGE; return ret; } else if (ret < 0) return ret; } return bmap->b_ops->bop_insert(bmap, key, ptr); } /** * nilfs_bmap_insert - insert a new key-record pair into a bmap * @bmap: bmap * @key: key * @rec: record * * Description: nilfs_bmap_insert() inserts the new key-record pair specified * by @key and @rec into @bmap. * * Return: 0 on success, or one of the following negative error codes on * failure: * * %-EEXIST - A record associated with @key already exists. * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_do_insert(bmap, key, rec); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) { __u64 keys[NILFS_BMAP_LARGE_LOW + 1]; __u64 ptrs[NILFS_BMAP_LARGE_LOW + 1]; int ret, n; if (bmap->b_ops->bop_check_delete != NULL) { ret = bmap->b_ops->bop_check_delete(bmap, key); if (ret > 0) { n = bmap->b_ops->bop_gather_data( bmap, keys, ptrs, NILFS_BMAP_LARGE_LOW + 1); if (n < 0) return n; ret = nilfs_direct_delete_and_convert( bmap, key, keys, ptrs, n); if (ret == 0) bmap->b_u.u_flags &= ~NILFS_BMAP_LARGE; return ret; } else if (ret < 0) return ret; } return bmap->b_ops->bop_delete(bmap, key); } /** * nilfs_bmap_seek_key - seek a valid entry and return its key * @bmap: bmap struct * @start: start key number * @keyp: place to store valid key * * Description: nilfs_bmap_seek_key() seeks a valid key on @bmap * starting from @start, and stores it to @keyp if found. * * Return: 0 on success, or one of the following negative error codes on * failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOENT - No valid entry was found. * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp) { int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_seek_key(bmap, start, keyp); up_read(&bmap->b_sem); if (ret < 0) ret = nilfs_bmap_convert_error(bmap, __func__, ret); return ret; } int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp) { int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_last_key(bmap, keyp); up_read(&bmap->b_sem); if (ret < 0) ret = nilfs_bmap_convert_error(bmap, __func__, ret); return ret; } /** * nilfs_bmap_delete - delete a key-record pair from a bmap * @bmap: bmap * @key: key * * Description: nilfs_bmap_delete() deletes the key-record pair specified by * @key from @bmap. * * Return: 0 on success, or one of the following negative error codes on * failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOENT - A record associated with @key does not exist. * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_do_delete(bmap, key); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key) { __u64 lastkey; int ret; ret = bmap->b_ops->bop_last_key(bmap, &lastkey); if (ret < 0) { if (ret == -ENOENT) ret = 0; return ret; } while (key <= lastkey) { ret = nilfs_bmap_do_delete(bmap, lastkey); if (ret < 0) return ret; ret = bmap->b_ops->bop_last_key(bmap, &lastkey); if (ret < 0) { if (ret == -ENOENT) ret = 0; return ret; } } return 0; } /** * nilfs_bmap_truncate - truncate a bmap to a specified key * @bmap: bmap * @key: key * * Description: nilfs_bmap_truncate() removes key-record pairs whose keys are * greater than or equal to @key from @bmap. * * Return: 0 on success, or one of the following negative error codes on * failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_do_truncate(bmap, key); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_clear - free resources a bmap holds * @bmap: bmap * * Description: nilfs_bmap_clear() frees resources associated with @bmap. */ void nilfs_bmap_clear(struct nilfs_bmap *bmap) { down_write(&bmap->b_sem); if (bmap->b_ops->bop_clear != NULL) bmap->b_ops->bop_clear(bmap); up_write(&bmap->b_sem); } /** * nilfs_bmap_propagate - propagate dirty state * @bmap: bmap * @bh: buffer head * * Description: nilfs_bmap_propagate() marks the buffers that directly or * indirectly refer to the block specified by @bh dirty. * * Return: 0 on success, or one of the following negative error codes on * failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { int ret; down_write(&bmap->b_sem); ret = bmap->b_ops->bop_propagate(bmap, bh); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_lookup_dirty_buffers - collect dirty block buffers * @bmap: bmap * @listp: pointer to buffer head list */ void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap, struct list_head *listp) { if (bmap->b_ops->bop_lookup_dirty_buffers != NULL) bmap->b_ops->bop_lookup_dirty_buffers(bmap, listp); } /** * nilfs_bmap_assign - assign a new block number to a block * @bmap: bmap * @bh: place to store a pointer to the buffer head to which a block * address is assigned (in/out) * @blocknr: block number * @binfo: block information * * Description: nilfs_bmap_assign() assigns the block number @blocknr to the * buffer specified by @bh. The block information is stored in the memory * pointed to by @binfo, and the buffer head may be replaced as a block * address is assigned, in which case a pointer to the new buffer head is * stored in the memory pointed to by @bh. * * Return: 0 on success, or one of the following negative error codes on * failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_assign(struct nilfs_bmap *bmap, struct buffer_head **bh, unsigned long blocknr, union nilfs_binfo *binfo) { int ret; down_write(&bmap->b_sem); ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_mark - mark block dirty * @bmap: bmap * @key: key * @level: level * * Description: nilfs_bmap_mark() marks the block specified by @key and @level * as dirty. * * Return: 0 on success, or one of the following negative error codes on * failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) { int ret; if (bmap->b_ops->bop_mark == NULL) return 0; down_write(&bmap->b_sem); ret = bmap->b_ops->bop_mark(bmap, key, level); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_test_and_clear_dirty - test and clear a bmap dirty state * @bmap: bmap * * Description: nilfs_test_and_clear() is the atomic operation to test and * clear the dirty state of @bmap. * * Return: 1 if @bmap is dirty, or 0 if clear. */ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_dirty(bmap); nilfs_bmap_clear_dirty(bmap); up_write(&bmap->b_sem); return ret; } /* * Internal use only */ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, const struct buffer_head *bh) { loff_t pos = folio_pos(bh->b_folio) + bh_offset(bh); return pos >> bmap->b_inode->i_blkbits; } __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *bmap, __u64 key) { __s64 diff; diff = key - bmap->b_last_allocated_key; if ((nilfs_bmap_keydiff_abs(diff) < NILFS_INODE_BMAP_SIZE) && (bmap->b_last_allocated_ptr != NILFS_BMAP_INVALID_PTR) && (bmap->b_last_allocated_ptr + diff > 0)) return bmap->b_last_allocated_ptr + diff; else return NILFS_BMAP_INVALID_PTR; } #define NILFS_BMAP_GROUP_DIV 8 __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap) { struct inode *dat = nilfs_bmap_get_dat(bmap); unsigned long entries_per_group = nilfs_palloc_entries_per_group(dat); unsigned long group = bmap->b_inode->i_ino / entries_per_group; return group * entries_per_group + (bmap->b_inode->i_ino % NILFS_BMAP_GROUP_DIV) * (entries_per_group / NILFS_BMAP_GROUP_DIV); } static struct lock_class_key nilfs_bmap_dat_lock_key; static struct lock_class_key nilfs_bmap_mdt_lock_key; /** * nilfs_bmap_read - read a bmap from an inode * @bmap: bmap * @raw_inode: on-disk inode * * Description: nilfs_bmap_read() initializes the bmap @bmap. * * Return: 0 on success, or one of the following negative error codes on * failure: * * %-EIO - I/O error (corrupted bmap). * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { if (raw_inode == NULL) memset(bmap->b_u.u_data, 0, NILFS_BMAP_SIZE); else memcpy(bmap->b_u.u_data, raw_inode->i_bmap, NILFS_BMAP_SIZE); init_rwsem(&bmap->b_sem); bmap->b_state = 0; bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; switch (bmap->b_inode->i_ino) { case NILFS_DAT_INO: bmap->b_ptr_type = NILFS_BMAP_PTR_P; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); break; case NILFS_CPFILE_INO: case NILFS_SUFILE_INO: bmap->b_ptr_type = NILFS_BMAP_PTR_VS; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); break; case NILFS_IFILE_INO: lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); fallthrough; default: bmap->b_ptr_type = NILFS_BMAP_PTR_VM; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; break; } return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ? nilfs_btree_init(bmap) : nilfs_direct_init(bmap); } /** * nilfs_bmap_write - write back a bmap to an inode * @bmap: bmap * @raw_inode: on-disk inode * * Description: nilfs_bmap_write() stores @bmap in @raw_inode. */ void nilfs_bmap_write(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { memcpy(raw_inode->i_bmap, bmap->b_u.u_data, NILFS_INODE_BMAP_SIZE * sizeof(__le64)); if (bmap->b_inode->i_ino == NILFS_DAT_INO) bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; } void nilfs_bmap_init_gc(struct nilfs_bmap *bmap) { memset(&bmap->b_u, 0, NILFS_BMAP_SIZE); init_rwsem(&bmap->b_sem); bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; bmap->b_ptr_type = NILFS_BMAP_PTR_U; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; bmap->b_state = 0; nilfs_btree_init_gc(bmap); } void nilfs_bmap_save(const struct nilfs_bmap *bmap, struct nilfs_bmap_store *store) { memcpy(store->data, bmap->b_u.u_data, sizeof(store->data)); store->last_allocated_key = bmap->b_last_allocated_key; store->last_allocated_ptr = bmap->b_last_allocated_ptr; store->state = bmap->b_state; } void nilfs_bmap_restore(struct nilfs_bmap *bmap, const struct nilfs_bmap_store *store) { memcpy(bmap->b_u.u_data, store->data, sizeof(store->data)); bmap->b_last_allocated_key = store->last_allocated_key; bmap->b_last_allocated_ptr = store->last_allocated_ptr; bmap->b_state = store->state; } |
2 2 2 1 1 1 2 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | // SPDX-License-Identifier: GPL-2.0-or-later /* * USB HID driver for Glorious PC Gaming Race * Glorious Model O, O- and D mice. * * Copyright (c) 2020 Samuel Čavoj <sammko@sammserver.com> */ /* */ #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" MODULE_AUTHOR("Samuel Čavoj <sammko@sammserver.com>"); MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice"); /* * Glorious Model O and O- specify the const flag in the consumer input * report descriptor, which leads to inputs being ignored. Fix this * by patching the descriptor. * * Glorious Model I incorrectly specifes the Usage Minimum for its * keyboard HID report, causing keycodes to be misinterpreted. * Fix this by setting Usage Minimum to 0 in that report. */ static const __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 213 && rdesc[84] == 129 && rdesc[112] == 129 && rdesc[140] == 129 && rdesc[85] == 3 && rdesc[113] == 3 && rdesc[141] == 3) { hid_info(hdev, "patching Glorious Model O consumer control report descriptor\n"); rdesc[85] = rdesc[113] = rdesc[141] = \ HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE; } if (*rsize == 156 && rdesc[41] == 1) { hid_info(hdev, "patching Glorious Model I keyboard report descriptor\n"); rdesc[41] = 0; } return rdesc; } static void glorious_update_name(struct hid_device *hdev) { const char *model = "Device"; switch (hdev->product) { case USB_DEVICE_ID_GLORIOUS_MODEL_O: model = "Model O"; break; case USB_DEVICE_ID_GLORIOUS_MODEL_D: model = "Model D"; break; case USB_DEVICE_ID_GLORIOUS_MODEL_I: model = "Model I"; break; } snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model); } static int glorious_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; hdev->quirks |= HID_QUIRK_INPUT_PER_APP; ret = hid_parse(hdev); if (ret) return ret; glorious_update_name(hdev); return hid_hw_start(hdev, HID_CONNECT_DEFAULT); } static const struct hid_device_id glorious_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH, USB_DEVICE_ID_GLORIOUS_MODEL_O) }, { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH, USB_DEVICE_ID_GLORIOUS_MODEL_D) }, { HID_USB_DEVICE(USB_VENDOR_ID_LAVIEW, USB_DEVICE_ID_GLORIOUS_MODEL_I) }, { } }; MODULE_DEVICE_TABLE(hid, glorious_devices); static struct hid_driver glorious_driver = { .name = "glorious", .id_table = glorious_devices, .probe = glorious_probe, .report_fixup = glorious_report_fixup }; module_hid_driver(glorious_driver); MODULE_LICENSE("GPL"); |
4 57 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/eventfd.h * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * */ #ifndef _LINUX_EVENTFD_H #define _LINUX_EVENTFD_H #include <linux/wait.h> #include <linux/err.h> #include <linux/percpu-defs.h> #include <linux/percpu.h> #include <linux/sched.h> #include <uapi/linux/eventfd.h> /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining * new flags, since they might collide with O_* ones. We want * to re-use O_* flags that couldn't possibly have a meaning * from eventfd, in order to leave a free define-space for * shared O_* flags. */ #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) struct eventfd_ctx; struct file; #ifdef CONFIG_EVENTFD void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); static inline bool eventfd_signal_allowed(void) { return !current->in_eventfd; } #else /* CONFIG_EVENTFD */ /* * Ugly ugly ugly error layer to support modules that uses eventfd but * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. */ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) { return ERR_PTR(-ENOSYS); } static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask) { } static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) { } static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt) { return -ENOSYS; } static inline bool eventfd_signal_allowed(void) { return true; } static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) { } #endif static inline void eventfd_signal(struct eventfd_ctx *ctx) { eventfd_signal_mask(ctx, 0); } #endif /* _LINUX_EVENTFD_H */ |
9 16 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. */ #ifndef __BMAP_DOT_H__ #define __BMAP_DOT_H__ #include <linux/iomap.h> #include "inode.h" struct inode; struct gfs2_inode; struct page; /** * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file * @ip: the file * @len: the number of bytes to be written to the file * @data_blocks: returns the number of data blocks required * @ind_blocks: returns the number of indirect blocks required * */ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip, unsigned int len, unsigned int *data_blocks, unsigned int *ind_blocks) { const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); unsigned int tmp; BUG_ON(gfs2_is_dir(ip)); *data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3; *ind_blocks = 3 * (sdp->sd_max_height - 1); for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) { tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); *ind_blocks += tmp; } } extern const struct iomap_ops gfs2_iomap_ops; extern const struct iomap_writeback_ops gfs2_writeback_ops; int gfs2_unstuff_dinode(struct gfs2_inode *ip); int gfs2_block_map(struct inode *inode, sector_t lblock, struct buffer_head *bh, int create); int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length, struct iomap *iomap); int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length, struct iomap *iomap); int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock, unsigned int *extlen); int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock, unsigned *extlen, bool *new); int gfs2_setattr_size(struct inode *inode, u64 size); int gfs2_truncatei_resume(struct gfs2_inode *ip); int gfs2_file_dealloc(struct gfs2_inode *ip); int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, unsigned int len); int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd); void gfs2_free_journal_extents(struct gfs2_jdesc *jd); int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length); #endif /* __BMAP_DOT_H__ */ |
14 14 3 3 3 3 17 3 17 14 3 3 17 3 3 3 3 3 3 3 17 17 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MIN_HEAP_H #define _LINUX_MIN_HEAP_H #include <linux/bug.h> #include <linux/string.h> #include <linux/types.h> /* * The Min Heap API provides utilities for managing min-heaps, a binary tree * structure where each node's value is less than or equal to its children's * values, ensuring the smallest element is at the root. * * Users should avoid directly calling functions prefixed with __min_heap_*(). * Instead, use the provided macro wrappers. * * For further details and examples, refer to Documentation/core-api/min_heap.rst. */ /** * Data structure to hold a min-heap. * @nr: Number of elements currently in the heap. * @size: Maximum number of elements that can be held in current storage. * @data: Pointer to the start of array holding the heap elements. * @preallocated: Start of the static preallocated array holding the heap elements. */ #define MIN_HEAP_PREALLOCATED(_type, _name, _nr) \ struct _name { \ size_t nr; \ size_t size; \ _type *data; \ _type preallocated[_nr]; \ } #define DEFINE_MIN_HEAP(_type, _name) MIN_HEAP_PREALLOCATED(_type, _name, 0) typedef DEFINE_MIN_HEAP(char, min_heap_char) min_heap_char; #define __minheap_cast(_heap) (typeof((_heap)->data[0]) *) #define __minheap_obj_size(_heap) sizeof((_heap)->data[0]) /** * struct min_heap_callbacks - Data/functions to customise the min_heap. * @less: Partial order function for this heap. * @swp: Swap elements function. */ struct min_heap_callbacks { bool (*less)(const void *lhs, const void *rhs, void *args); void (*swp)(void *lhs, void *rhs, void *args); }; /** * is_aligned - is this pointer & size okay for word-wide copying? * @base: pointer to data * @size: size of each element * @align: required alignment (typically 4 or 8) * * Returns true if elements can be copied using word loads and stores. * The size must be a multiple of the alignment, and the base address must * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. * * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)" * to "if ((a | b) & mask)", so we do that by hand. */ __attribute_const__ __always_inline static bool is_aligned(const void *base, size_t size, unsigned char align) { unsigned char lsbits = (unsigned char)size; (void)base; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS lsbits |= (unsigned char)(uintptr_t)base; #endif return (lsbits & (align - 1)) == 0; } /** * swap_words_32 - swap two elements in 32-bit chunks * @a: pointer to the first element to swap * @b: pointer to the second element to swap * @n: element size (must be a multiple of 4) * * Exchange the two objects in memory. This exploits base+index addressing, * which basically all CPUs have, to minimize loop overhead computations. * * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the * bottom of the loop, even though the zero flag is still valid from the * subtract (since the intervening mov instructions don't alter the flags). * Gcc 8.1.0 doesn't have that problem. */ static __always_inline void swap_words_32(void *a, void *b, size_t n) { do { u32 t = *(u32 *)(a + (n -= 4)); *(u32 *)(a + n) = *(u32 *)(b + n); *(u32 *)(b + n) = t; } while (n); } /** * swap_words_64 - swap two elements in 64-bit chunks * @a: pointer to the first element to swap * @b: pointer to the second element to swap * @n: element size (must be a multiple of 8) * * Exchange the two objects in memory. Th |