/src/u-boot/drivers/virtio/virtio_ring.c
Line | Count | Source |
1 | | // SPDX-License-Identifier: GPL-2.0+ |
2 | | /* |
3 | | * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi> |
4 | | * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com> |
5 | | * |
6 | | * virtio ring implementation |
7 | | */ |
8 | | |
9 | | #include <bouncebuf.h> |
10 | | #include <dm.h> |
11 | | #include <log.h> |
12 | | #include <malloc.h> |
13 | | #include <virtio_types.h> |
14 | | #include <virtio.h> |
15 | | #include <virtio_ring.h> |
16 | | #include <linux/bug.h> |
17 | | #include <linux/compat.h> |
18 | | #include <linux/kernel.h> |
19 | | |
20 | | static void *virtio_alloc_pages(struct udevice *vdev, u32 npages) |
21 | 0 | { |
22 | 0 | return memalign(PAGE_SIZE, npages * PAGE_SIZE); |
23 | 0 | } |
24 | | |
25 | | static void virtio_free_pages(struct udevice *vdev, void *ptr, u32 npages) |
26 | 0 | { |
27 | 0 | free(ptr); |
28 | 0 | } |
29 | | |
30 | | static int __bb_force_page_align(struct bounce_buffer *state) |
31 | 0 | { |
32 | 0 | const ulong align_mask = PAGE_SIZE - 1; |
33 | 0 |
|
34 | 0 | if ((ulong)state->user_buffer & align_mask) |
35 | 0 | return 0; |
36 | 0 |
|
37 | 0 | if (state->len != state->len_aligned) |
38 | 0 | return 0; |
39 | 0 |
|
40 | 0 | return 1; |
41 | 0 | } |
42 | | |
43 | | static unsigned int virtqueue_attach_desc(struct virtqueue *vq, unsigned int i, |
44 | | struct virtio_sg *sg, u16 flags) |
45 | 0 | { |
46 | 0 | struct vring_desc_shadow *desc_shadow = &vq->vring_desc_shadow[i]; |
47 | 0 | struct vring_desc *desc = &vq->vring.desc[i]; |
48 | 0 | void *addr; |
49 | |
|
50 | 0 | if (IS_ENABLED(CONFIG_BOUNCE_BUFFER) && vq->vring.bouncebufs) { |
51 | 0 | struct bounce_buffer *bb = &vq->vring.bouncebufs[i]; |
52 | 0 | unsigned int bbflags; |
53 | 0 | int ret; |
54 | |
|
55 | 0 | if (flags & VRING_DESC_F_WRITE) |
56 | 0 | bbflags = GEN_BB_WRITE; |
57 | 0 | else |
58 | 0 | bbflags = GEN_BB_READ; |
59 | |
|
60 | 0 | ret = bounce_buffer_start_extalign(bb, sg->addr, sg->length, |
61 | 0 | bbflags, PAGE_SIZE, |
62 | 0 | __bb_force_page_align); |
63 | 0 | if (ret) { |
64 | 0 | debug("%s: failed to allocate bounce buffer (length 0x%zx)\n", |
65 | 0 | vq->vdev->name, sg->length); |
66 | 0 | } |
67 | |
|
68 | 0 | addr = bb->bounce_buffer; |
69 | 0 | } else { |
70 | 0 | addr = sg->addr; |
71 | 0 | } |
72 | | |
73 | | /* Update the shadow descriptor. */ |
74 | 0 | desc_shadow->addr = (u64)(uintptr_t)addr; |
75 | 0 | desc_shadow->len = sg->length; |
76 | 0 | desc_shadow->flags = flags; |
77 | | |
78 | | /* Update the shared descriptor to match the shadow. */ |
79 | 0 | desc->addr = cpu_to_virtio64(vq->vdev, desc_shadow->addr); |
80 | 0 | desc->len = cpu_to_virtio32(vq->vdev, desc_shadow->len); |
81 | 0 | desc->flags = cpu_to_virtio16(vq->vdev, desc_shadow->flags); |
82 | 0 | desc->next = cpu_to_virtio16(vq->vdev, desc_shadow->next); |
83 | |
|
84 | 0 | return desc_shadow->next; |
85 | 0 | } |
86 | | |
87 | | static void virtqueue_detach_desc(struct virtqueue *vq, unsigned int idx) |
88 | 0 | { |
89 | 0 | struct vring_desc *desc = &vq->vring.desc[idx]; |
90 | 0 | struct bounce_buffer *bb; |
91 | |
|
92 | 0 | if (!IS_ENABLED(CONFIG_BOUNCE_BUFFER) || !vq->vring.bouncebufs) |
93 | 0 | return; |
94 | | |
95 | 0 | bb = &vq->vring.bouncebufs[idx]; |
96 | 0 | bounce_buffer_stop(bb); |
97 | 0 | desc->addr = cpu_to_virtio64(vq->vdev, (u64)(uintptr_t)bb->user_buffer); |
98 | 0 | } |
99 | | |
100 | | int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[], |
101 | | unsigned int out_sgs, unsigned int in_sgs) |
102 | 0 | { |
103 | 0 | struct vring_desc *desc; |
104 | 0 | unsigned int descs_used = out_sgs + in_sgs; |
105 | 0 | unsigned int i, n, avail, uninitialized_var(prev); |
106 | 0 | int head; |
107 | |
|
108 | 0 | WARN_ON(descs_used == 0); |
109 | |
|
110 | 0 | head = vq->free_head; |
111 | |
|
112 | 0 | desc = vq->vring.desc; |
113 | 0 | i = head; |
114 | |
|
115 | 0 | if (vq->num_free < descs_used) { |
116 | 0 | debug("Can't add buf len %i - avail = %i\n", |
117 | 0 | descs_used, vq->num_free); |
118 | | /* |
119 | | * FIXME: for historical reasons, we force a notify here if |
120 | | * there are outgoing parts to the buffer. Presumably the |
121 | | * host should service the ring ASAP. |
122 | | */ |
123 | 0 | if (out_sgs) |
124 | 0 | virtio_notify(vq->vdev, vq); |
125 | 0 | return -ENOSPC; |
126 | 0 | } |
127 | | |
128 | 0 | for (n = 0; n < descs_used; n++) { |
129 | 0 | u16 flags = VRING_DESC_F_NEXT; |
130 | |
|
131 | 0 | if (n >= out_sgs) |
132 | 0 | flags |= VRING_DESC_F_WRITE; |
133 | 0 | prev = i; |
134 | 0 | i = virtqueue_attach_desc(vq, i, sgs[n], flags); |
135 | 0 | } |
136 | | /* Last one doesn't continue */ |
137 | 0 | vq->vring_desc_shadow[prev].flags &= ~VRING_DESC_F_NEXT; |
138 | 0 | desc[prev].flags = cpu_to_virtio16(vq->vdev, vq->vring_desc_shadow[prev].flags); |
139 | | |
140 | | /* We're using some buffers from the free list. */ |
141 | 0 | vq->num_free -= descs_used; |
142 | | |
143 | | /* Update free pointer */ |
144 | 0 | vq->free_head = i; |
145 | | |
146 | | /* Mark the descriptor as the head of a chain. */ |
147 | 0 | vq->vring_desc_shadow[head].chain_head = true; |
148 | | |
149 | | /* |
150 | | * Put entry in available array (but don't update avail->idx |
151 | | * until they do sync). |
152 | | */ |
153 | 0 | avail = vq->avail_idx_shadow & (vq->vring.num - 1); |
154 | 0 | vq->vring.avail->ring[avail] = cpu_to_virtio16(vq->vdev, head); |
155 | | |
156 | | /* |
157 | | * Descriptors and available array need to be set before we expose the |
158 | | * new available array entries. |
159 | | */ |
160 | 0 | virtio_wmb(); |
161 | 0 | vq->avail_idx_shadow++; |
162 | 0 | vq->vring.avail->idx = cpu_to_virtio16(vq->vdev, vq->avail_idx_shadow); |
163 | 0 | vq->num_added++; |
164 | | |
165 | | /* |
166 | | * This is very unlikely, but theoretically possible. |
167 | | * Kick just in case. |
168 | | */ |
169 | 0 | if (unlikely(vq->num_added == (1 << 16) - 1)) |
170 | 0 | virtqueue_kick(vq); |
171 | |
|
172 | 0 | return 0; |
173 | 0 | } |
174 | | |
175 | | static bool virtqueue_kick_prepare(struct virtqueue *vq) |
176 | 0 | { |
177 | 0 | u16 new, old; |
178 | 0 | bool needs_kick; |
179 | | |
180 | | /* |
181 | | * We need to expose available array entries before checking |
182 | | * avail event. |
183 | | */ |
184 | 0 | virtio_mb(); |
185 | |
|
186 | 0 | old = vq->avail_idx_shadow - vq->num_added; |
187 | 0 | new = vq->avail_idx_shadow; |
188 | 0 | vq->num_added = 0; |
189 | |
|
190 | 0 | if (vq->event) { |
191 | 0 | needs_kick = vring_need_event(virtio16_to_cpu(vq->vdev, |
192 | 0 | vring_avail_event(&vq->vring)), new, old); |
193 | 0 | } else { |
194 | 0 | needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(vq->vdev, |
195 | 0 | VRING_USED_F_NO_NOTIFY)); |
196 | 0 | } |
197 | |
|
198 | 0 | return needs_kick; |
199 | 0 | } |
200 | | |
201 | | void virtqueue_kick(struct virtqueue *vq) |
202 | 0 | { |
203 | 0 | if (virtqueue_kick_prepare(vq)) |
204 | 0 | virtio_notify(vq->vdev, vq); |
205 | 0 | } |
206 | | |
207 | | static void detach_buf(struct virtqueue *vq, unsigned int head) |
208 | 0 | { |
209 | 0 | unsigned int i; |
210 | | |
211 | | /* Unmark the descriptor as the head of a chain. */ |
212 | 0 | vq->vring_desc_shadow[head].chain_head = false; |
213 | | |
214 | | /* Put back on free list: unmap first-level descriptors and find end */ |
215 | 0 | i = head; |
216 | |
|
217 | 0 | while (vq->vring_desc_shadow[i].flags & VRING_DESC_F_NEXT) { |
218 | 0 | virtqueue_detach_desc(vq, i); |
219 | 0 | i = vq->vring_desc_shadow[i].next; |
220 | 0 | vq->num_free++; |
221 | 0 | } |
222 | |
|
223 | 0 | virtqueue_detach_desc(vq, i); |
224 | 0 | vq->vring_desc_shadow[i].next = vq->free_head; |
225 | 0 | vq->free_head = head; |
226 | | |
227 | | /* Plus final descriptor */ |
228 | 0 | vq->num_free++; |
229 | 0 | } |
230 | | |
231 | | static inline bool more_used(const struct virtqueue *vq) |
232 | 0 | { |
233 | 0 | return vq->last_used_idx != virtio16_to_cpu(vq->vdev, |
234 | 0 | vq->vring.used->idx); |
235 | 0 | } |
236 | | |
237 | | void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len) |
238 | 0 | { |
239 | 0 | unsigned int i; |
240 | 0 | u16 last_used; |
241 | |
|
242 | 0 | if (!more_used(vq)) { |
243 | 0 | debug("(%s.%d): No more buffers in queue\n", |
244 | 0 | vq->vdev->name, vq->index); |
245 | 0 | return NULL; |
246 | 0 | } |
247 | | |
248 | | /* Only get used array entries after they have been exposed by host */ |
249 | 0 | virtio_rmb(); |
250 | |
|
251 | 0 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); |
252 | 0 | i = virtio32_to_cpu(vq->vdev, vq->vring.used->ring[last_used].id); |
253 | 0 | if (len) { |
254 | 0 | *len = virtio32_to_cpu(vq->vdev, |
255 | 0 | vq->vring.used->ring[last_used].len); |
256 | 0 | debug("(%s.%d): last used idx %u with len %u\n", |
257 | 0 | vq->vdev->name, vq->index, i, *len); |
258 | 0 | } |
259 | |
|
260 | 0 | if (unlikely(i >= vq->vring.num)) { |
261 | 0 | printf("(%s.%d): id %u out of range\n", |
262 | 0 | vq->vdev->name, vq->index, i); |
263 | 0 | return NULL; |
264 | 0 | } |
265 | | |
266 | 0 | if (unlikely(!vq->vring_desc_shadow[i].chain_head)) { |
267 | 0 | printf("(%s.%d): id %u is not a head\n", |
268 | 0 | vq->vdev->name, vq->index, i); |
269 | 0 | return NULL; |
270 | 0 | } |
271 | | |
272 | 0 | detach_buf(vq, i); |
273 | 0 | vq->last_used_idx++; |
274 | | /* |
275 | | * If we expect an interrupt for the next entry, tell host |
276 | | * by writing event index and flush out the write before |
277 | | * the read in the next get_buf call. |
278 | | */ |
279 | 0 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) |
280 | 0 | virtio_store_mb(&vring_used_event(&vq->vring), |
281 | 0 | cpu_to_virtio16(vq->vdev, vq->last_used_idx)); |
282 | |
|
283 | 0 | return (void *)(uintptr_t)vq->vring_desc_shadow[i].addr; |
284 | 0 | } |
285 | | |
286 | | static struct virtqueue *__vring_new_virtqueue(unsigned int index, |
287 | | struct vring vring, |
288 | | struct udevice *udev) |
289 | 0 | { |
290 | 0 | unsigned int i; |
291 | 0 | struct virtqueue *vq; |
292 | 0 | struct vring_desc_shadow *vring_desc_shadow; |
293 | 0 | struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev); |
294 | 0 | struct udevice *vdev = uc_priv->vdev; |
295 | |
|
296 | 0 | vq = malloc(sizeof(*vq)); |
297 | 0 | if (!vq) |
298 | 0 | return NULL; |
299 | | |
300 | 0 | vring_desc_shadow = calloc(vring.num, sizeof(struct vring_desc_shadow)); |
301 | 0 | if (!vring_desc_shadow) { |
302 | 0 | free(vq); |
303 | 0 | return NULL; |
304 | 0 | } |
305 | | |
306 | 0 | vq->vdev = vdev; |
307 | 0 | vq->index = index; |
308 | 0 | vq->num_free = vring.num; |
309 | 0 | vq->vring = vring; |
310 | 0 | vq->vring_desc_shadow = vring_desc_shadow; |
311 | 0 | vq->last_used_idx = 0; |
312 | 0 | vq->avail_flags_shadow = 0; |
313 | 0 | vq->avail_idx_shadow = 0; |
314 | 0 | vq->num_added = 0; |
315 | 0 | list_add_tail(&vq->list, &uc_priv->vqs); |
316 | |
|
317 | 0 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
318 | | |
319 | | /* Tell other side not to bother us */ |
320 | 0 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; |
321 | 0 | if (!vq->event) |
322 | 0 | vq->vring.avail->flags = cpu_to_virtio16(vdev, |
323 | 0 | vq->avail_flags_shadow); |
324 | | |
325 | | /* Put everything in free lists */ |
326 | 0 | vq->free_head = 0; |
327 | 0 | for (i = 0; i < vring.num - 1; i++) |
328 | 0 | vq->vring_desc_shadow[i].next = i + 1; |
329 | |
|
330 | 0 | return vq; |
331 | 0 | } |
332 | | |
333 | | struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num, |
334 | | unsigned int vring_align, |
335 | | struct udevice *udev) |
336 | 0 | { |
337 | 0 | struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev); |
338 | 0 | struct udevice *vdev = uc_priv->vdev; |
339 | 0 | struct virtqueue *vq; |
340 | 0 | void *queue = NULL; |
341 | 0 | struct bounce_buffer *bbs = NULL; |
342 | 0 | struct vring vring; |
343 | | |
344 | | /* We assume num is a power of 2 */ |
345 | 0 | if (num & (num - 1)) { |
346 | 0 | printf("Bad virtqueue length %u\n", num); |
347 | 0 | return NULL; |
348 | 0 | } |
349 | | |
350 | | /* TODO: allocate each queue chunk individually */ |
351 | 0 | for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { |
352 | 0 | vring.size = vring_size(num, vring_align); |
353 | |
|
354 | 0 | queue = virtio_alloc_pages(vdev, |
355 | 0 | DIV_ROUND_UP(vring.size, PAGE_SIZE)); |
356 | 0 | if (queue) |
357 | 0 | break; |
358 | 0 | } |
359 | |
|
360 | 0 | if (!num) |
361 | 0 | return NULL; |
362 | | |
363 | 0 | if (!queue) { |
364 | | /* Try to get a single page. You are my only hope! */ |
365 | 0 | queue = virtio_alloc_pages(vdev, 1); |
366 | 0 | vring.size = PAGE_SIZE; |
367 | 0 | } |
368 | 0 | if (!queue) |
369 | 0 | return NULL; |
370 | | |
371 | 0 | memset(queue, 0, vring_size(num, vring_align)); |
372 | |
|
373 | 0 | if (virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { |
374 | 0 | bbs = calloc(num, sizeof(*bbs)); |
375 | 0 | if (!bbs) |
376 | 0 | goto err_free_queue; |
377 | 0 | } |
378 | | |
379 | 0 | vring_init(&vring, num, queue, vring_align, bbs); |
380 | |
|
381 | 0 | vq = __vring_new_virtqueue(index, vring, udev); |
382 | 0 | if (!vq) |
383 | 0 | goto err_free_bbs; |
384 | | |
385 | 0 | debug("(%s): created vring @ %p for vq @ %p with num %u\n", udev->name, |
386 | 0 | queue, vq, num); |
387 | |
|
388 | 0 | return vq; |
389 | | |
390 | 0 | err_free_bbs: |
391 | 0 | free(bbs); |
392 | 0 | err_free_queue: |
393 | 0 | virtio_free_pages(vdev, queue, DIV_ROUND_UP(vring.size, PAGE_SIZE)); |
394 | 0 | return NULL; |
395 | 0 | } |
396 | | |
397 | | void vring_del_virtqueue(struct virtqueue *vq) |
398 | 0 | { |
399 | 0 | virtio_free_pages(vq->vdev, vq->vring.desc, |
400 | 0 | DIV_ROUND_UP(vq->vring.size, PAGE_SIZE)); |
401 | 0 | free(vq->vring_desc_shadow); |
402 | 0 | list_del(&vq->list); |
403 | 0 | free(vq->vring.bouncebufs); |
404 | 0 | free(vq); |
405 | 0 | } |
406 | | |
407 | | unsigned int virtqueue_get_vring_size(struct virtqueue *vq) |
408 | 0 | { |
409 | 0 | return vq->vring.num; |
410 | 0 | } |
411 | | |
412 | | ulong virtqueue_get_desc_addr(struct virtqueue *vq) |
413 | 0 | { |
414 | 0 | return (ulong)vq->vring.desc; |
415 | 0 | } |
416 | | |
417 | | ulong virtqueue_get_avail_addr(struct virtqueue *vq) |
418 | 0 | { |
419 | 0 | return (ulong)vq->vring.desc + |
420 | 0 | ((char *)vq->vring.avail - (char *)vq->vring.desc); |
421 | 0 | } |
422 | | |
423 | | ulong virtqueue_get_used_addr(struct virtqueue *vq) |
424 | 0 | { |
425 | 0 | return (ulong)vq->vring.desc + |
426 | 0 | ((char *)vq->vring.used - (char *)vq->vring.desc); |
427 | 0 | } |
428 | | |
429 | | bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx) |
430 | 0 | { |
431 | 0 | virtio_mb(); |
432 | |
|
433 | 0 | return last_used_idx != virtio16_to_cpu(vq->vdev, vq->vring.used->idx); |
434 | 0 | } |
435 | | |
436 | | void virtqueue_dump(struct virtqueue *vq) |
437 | 0 | { |
438 | 0 | unsigned int i; |
439 | |
|
440 | 0 | printf("virtqueue %p for dev %s:\n", vq, vq->vdev->name); |
441 | 0 | printf("\tindex %u, phys addr %p num %u\n", |
442 | 0 | vq->index, vq->vring.desc, vq->vring.num); |
443 | 0 | printf("\tfree_head %u, num_added %u, num_free %u\n", |
444 | 0 | vq->free_head, vq->num_added, vq->num_free); |
445 | 0 | printf("\tlast_used_idx %u, avail_flags_shadow %u, avail_idx_shadow %u\n", |
446 | 0 | vq->last_used_idx, vq->avail_flags_shadow, vq->avail_idx_shadow); |
447 | |
|
448 | 0 | printf("Shadow descriptor dump:\n"); |
449 | 0 | for (i = 0; i < vq->vring.num; i++) { |
450 | 0 | struct vring_desc_shadow *desc = &vq->vring_desc_shadow[i]; |
451 | |
|
452 | 0 | printf("\tdesc_shadow[%u] = { 0x%llx, len %u, flags %u, next %u }\n", |
453 | 0 | i, desc->addr, desc->len, desc->flags, desc->next); |
454 | 0 | } |
455 | |
|
456 | 0 | printf("Avail ring dump:\n"); |
457 | 0 | printf("\tflags %u, idx %u\n", |
458 | 0 | vq->vring.avail->flags, vq->vring.avail->idx); |
459 | 0 | for (i = 0; i < vq->vring.num; i++) { |
460 | 0 | printf("\tavail[%u] = %u\n", |
461 | 0 | i, vq->vring.avail->ring[i]); |
462 | 0 | } |
463 | |
|
464 | 0 | printf("Used ring dump:\n"); |
465 | 0 | printf("\tflags %u, idx %u\n", |
466 | 0 | vq->vring.used->flags, vq->vring.used->idx); |
467 | 0 | for (i = 0; i < vq->vring.num; i++) { |
468 | 0 | printf("\tused[%u] = { %u, %u }\n", i, |
469 | 0 | vq->vring.used->ring[i].id, vq->vring.used->ring[i].len); |
470 | 0 | } |
471 | 0 | } |