1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <linux/virtio_net.h>
10 #include <rte_memcpy.h>
11 #include <rte_vhost.h>
16 * A very simple vhost-user net driver implementation, without
17 * any extra features being enabled, such as TSO and mrg-Rx.
21 vs_vhost_net_setup(struct vhost_dev *dev)
25 struct vhost_queue *queue;
28 RTE_LOG(INFO, VHOST_CONFIG,
29 "setting builtin vhost-user net driver\n");
31 rte_vhost_get_negotiated_features(vid, &dev->features);
32 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
33 dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
35 dev->hdr_len = sizeof(struct virtio_net_hdr);
37 ret = rte_vhost_get_mem_table(vid, &dev->mem);
39 RTE_LOG(ERR, VHOST_CONFIG, "Failed to get "
40 "VM memory layout for device(%d)\n", vid);
44 dev->nr_vrings = rte_vhost_get_vring_num(vid);
45 for (i = 0; i < dev->nr_vrings; i++) {
46 queue = &dev->queues[i];
48 queue->last_used_idx = 0;
49 queue->last_avail_idx = 0;
50 rte_vhost_get_vhost_vring(vid, i, &queue->vr);
55 vs_vhost_net_remove(struct vhost_dev *dev)
60 static __rte_always_inline int
61 enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
62 struct rte_mbuf *m, uint16_t desc_idx)
64 uint32_t desc_avail, desc_offset;
65 uint64_t desc_chunck_len;
66 uint32_t mbuf_avail, mbuf_offset;
68 struct vring_desc *desc;
69 uint64_t desc_addr, desc_gaddr;
70 struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0};
71 /* A counter to avoid desc dead loop chain */
74 desc = &vr->desc[desc_idx];
75 desc_chunck_len = desc->len;
76 desc_gaddr = desc->addr;
77 desc_addr = rte_vhost_va_from_guest_pa(
78 dev->mem, desc_gaddr, &desc_chunck_len);
80 * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
81 * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
82 * otherwise stores offset on the stack instead of in a register.
84 if (unlikely(desc->len < dev->hdr_len) || !desc_addr)
87 rte_prefetch0((void *)(uintptr_t)desc_addr);
89 /* write virtio-net header */
90 if (likely(desc_chunck_len >= dev->hdr_len)) {
91 *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
92 desc_offset = dev->hdr_len;
95 uint64_t remain = dev->hdr_len;
96 uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr, dst;
97 uint64_t guest_addr = desc_gaddr;
101 dst = rte_vhost_va_from_guest_pa(dev->mem,
103 if (unlikely(!dst || !len))
106 rte_memcpy((void *)(uintptr_t)dst,
107 (void *)(uintptr_t)src,
115 desc_chunck_len = desc->len - dev->hdr_len;
116 desc_gaddr += dev->hdr_len;
117 desc_addr = rte_vhost_va_from_guest_pa(
118 dev->mem, desc_gaddr,
120 if (unlikely(!desc_addr))
126 desc_avail = desc->len - dev->hdr_len;
128 mbuf_avail = rte_pktmbuf_data_len(m);
130 while (mbuf_avail != 0 || m->next != NULL) {
131 /* done with current mbuf, fetch next */
132 if (mbuf_avail == 0) {
136 mbuf_avail = rte_pktmbuf_data_len(m);
139 /* done with current desc buf, fetch next */
140 if (desc_avail == 0) {
141 if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
142 /* Room in vring buffer is not enough */
145 if (unlikely(desc->next >= vr->size ||
146 ++nr_desc > vr->size))
149 desc = &vr->desc[desc->next];
150 desc_chunck_len = desc->len;
151 desc_gaddr = desc->addr;
152 desc_addr = rte_vhost_va_from_guest_pa(
153 dev->mem, desc_gaddr, &desc_chunck_len);
154 if (unlikely(!desc_addr))
158 desc_avail = desc->len;
159 } else if (unlikely(desc_chunck_len == 0)) {
160 desc_chunck_len = desc_avail;
161 desc_gaddr += desc_offset;
162 desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
165 if (unlikely(!desc_addr))
171 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
172 rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
173 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
176 mbuf_avail -= cpy_len;
177 mbuf_offset += cpy_len;
178 desc_avail -= cpy_len;
179 desc_offset += cpy_len;
180 desc_chunck_len -= cpy_len;
187 vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
188 struct rte_mbuf **pkts, uint32_t count)
190 struct vhost_queue *queue;
191 struct rte_vhost_vring *vr;
192 uint16_t avail_idx, free_entries, start_idx;
193 uint16_t desc_indexes[MAX_PKT_BURST];
197 queue = &dev->queues[queue_id];
200 avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE);
201 start_idx = queue->last_used_idx;
202 free_entries = avail_idx - start_idx;
203 count = RTE_MIN(count, free_entries);
204 count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
208 /* Retrieve all of the desc indexes first to avoid caching issues. */
209 rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]);
210 for (i = 0; i < count; i++) {
211 used_idx = (start_idx + i) & (vr->size - 1);
212 desc_indexes[i] = vr->avail->ring[used_idx];
213 vr->used->ring[used_idx].id = desc_indexes[i];
214 vr->used->ring[used_idx].len = pkts[i]->pkt_len +
218 rte_prefetch0(&vr->desc[desc_indexes[0]]);
219 for (i = 0; i < count; i++) {
220 uint16_t desc_idx = desc_indexes[i];
223 err = enqueue_pkt(dev, vr, pkts[i], desc_idx);
225 used_idx = (start_idx + i) & (vr->size - 1);
226 vr->used->ring[used_idx].len = dev->hdr_len;
230 rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
233 __atomic_add_fetch(&vr->used->idx, count, __ATOMIC_RELEASE);
234 queue->last_used_idx += count;
236 rte_vhost_vring_call(dev->vid, queue_id);
242 builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
243 struct rte_mbuf **pkts, uint32_t count)
245 return vs_enqueue_pkts(dev, queue_id, pkts, count);
248 static __rte_always_inline int
249 dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
250 struct rte_mbuf *m, uint16_t desc_idx,
251 struct rte_mempool *mbuf_pool)
253 struct vring_desc *desc;
254 uint64_t desc_addr, desc_gaddr;
255 uint32_t desc_avail, desc_offset;
256 uint64_t desc_chunck_len;
257 uint32_t mbuf_avail, mbuf_offset;
259 struct rte_mbuf *cur = m, *prev = m;
260 /* A counter to avoid desc dead loop chain */
261 uint32_t nr_desc = 1;
263 desc = &vr->desc[desc_idx];
264 if (unlikely((desc->len < dev->hdr_len)) ||
265 (desc->flags & VRING_DESC_F_INDIRECT))
268 desc_chunck_len = desc->len;
269 desc_gaddr = desc->addr;
270 desc_addr = rte_vhost_va_from_guest_pa(
271 dev->mem, desc_gaddr, &desc_chunck_len);
272 if (unlikely(!desc_addr))
276 * We don't support ANY_LAYOUT, neither VERSION_1, meaning
277 * a Tx packet from guest must have 2 desc buffers at least:
278 * the first for storing the header and the others for
281 * And since we don't support TSO, we could simply skip the
284 desc = &vr->desc[desc->next];
285 desc_chunck_len = desc->len;
286 desc_gaddr = desc->addr;
287 desc_addr = rte_vhost_va_from_guest_pa(
288 dev->mem, desc_gaddr, &desc_chunck_len);
289 if (unlikely(!desc_addr))
291 rte_prefetch0((void *)(uintptr_t)desc_addr);
294 desc_avail = desc->len;
298 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
300 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
301 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
303 (void *)((uintptr_t)(desc_addr + desc_offset)),
306 mbuf_avail -= cpy_len;
307 mbuf_offset += cpy_len;
308 desc_avail -= cpy_len;
309 desc_offset += cpy_len;
310 desc_chunck_len -= cpy_len;
312 /* This desc reaches to its end, get the next one */
313 if (desc_avail == 0) {
314 if ((desc->flags & VRING_DESC_F_NEXT) == 0)
317 if (unlikely(desc->next >= vr->size ||
318 ++nr_desc > vr->size))
320 desc = &vr->desc[desc->next];
322 desc_chunck_len = desc->len;
323 desc_gaddr = desc->addr;
324 desc_addr = rte_vhost_va_from_guest_pa(
325 dev->mem, desc_gaddr, &desc_chunck_len);
326 if (unlikely(!desc_addr))
328 rte_prefetch0((void *)(uintptr_t)desc_addr);
331 desc_avail = desc->len;
332 } else if (unlikely(desc_chunck_len == 0)) {
333 desc_chunck_len = desc_avail;
334 desc_gaddr += desc_offset;
335 desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
338 if (unlikely(!desc_addr))
345 * This mbuf reaches to its end, get a new one
348 if (mbuf_avail == 0) {
349 cur = rte_pktmbuf_alloc(mbuf_pool);
350 if (unlikely(cur == NULL)) {
351 RTE_LOG(ERR, VHOST_DATA, "Failed to "
352 "allocate memory for mbuf.\n");
357 prev->data_len = mbuf_offset;
359 m->pkt_len += mbuf_offset;
363 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
367 prev->data_len = mbuf_offset;
368 m->pkt_len += mbuf_offset;
374 vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
375 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
377 struct vhost_queue *queue;
378 struct rte_vhost_vring *vr;
379 uint32_t desc_indexes[MAX_PKT_BURST];
382 uint16_t free_entries;
385 queue = &dev->queues[queue_id];
388 free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) -
389 queue->last_avail_idx;
390 if (free_entries == 0)
393 /* Prefetch available and used ring */
394 avail_idx = queue->last_avail_idx & (vr->size - 1);
395 used_idx = queue->last_used_idx & (vr->size - 1);
396 rte_prefetch0(&vr->avail->ring[avail_idx]);
397 rte_prefetch0(&vr->used->ring[used_idx]);
399 count = RTE_MIN(count, MAX_PKT_BURST);
400 count = RTE_MIN(count, free_entries);
402 if (unlikely(count == 0))
406 * Retrieve all of the head indexes first and pre-update used entries
407 * to avoid caching issues.
409 for (i = 0; i < count; i++) {
410 avail_idx = (queue->last_avail_idx + i) & (vr->size - 1);
411 used_idx = (queue->last_used_idx + i) & (vr->size - 1);
412 desc_indexes[i] = vr->avail->ring[avail_idx];
414 vr->used->ring[used_idx].id = desc_indexes[i];
415 vr->used->ring[used_idx].len = 0;
418 /* Prefetch descriptor index. */
419 rte_prefetch0(&vr->desc[desc_indexes[0]]);
420 for (i = 0; i < count; i++) {
423 if (likely(i + 1 < count))
424 rte_prefetch0(&vr->desc[desc_indexes[i + 1]]);
426 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
427 if (unlikely(pkts[i] == NULL)) {
428 RTE_LOG(ERR, VHOST_DATA,
429 "Failed to allocate memory for mbuf.\n");
433 err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool);
435 rte_pktmbuf_free(pkts[i]);
441 queue->last_avail_idx += i;
442 queue->last_used_idx += i;
444 __atomic_add_fetch(&vr->used->idx, i, __ATOMIC_ACQ_REL);
446 rte_vhost_vring_call(dev->vid, queue_id);
452 builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
453 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
455 return vs_dequeue_pkts(dev, queue_id, mbuf_pool, pkts, count);