1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <rte_atomic.h>
11 #include <rte_memory.h>
12 #include <rte_mempool.h>
14 #include "virtio_pci.h"
15 #include "virtio_ring.h"
16 #include "virtio_logs.h"
17 #include "virtio_rxtx.h"
22 * Per virtio_ring.h in Linux.
23 * For virtio_pci on SMP, we don't need to order with respect to MMIO
24 * accesses through relaxed memory I/O windows, so smp_mb() et al are
27 * For using virtio to talk to real devices (eg. vDPA) we do need real
31 virtio_mb(uint8_t weak_barriers)
40 virtio_rmb(uint8_t weak_barriers)
49 virtio_wmb(uint8_t weak_barriers)
58 virtqueue_store_flags_packed(struct vring_packed_desc *dp,
59 uint16_t flags, uint8_t weak_barriers)
62 /* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports
63 * a better perf(~1.5%), which comes from the saved branch by the compiler.
64 * The if and else branch are identical with the smp and cio barriers both
65 * defined as compiler barriers on x86.
67 #ifdef RTE_ARCH_X86_64
71 __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
78 #ifdef RTE_PMD_PACKET_PREFETCH
79 #define rte_packet_prefetch(p) rte_prefetch1(p)
81 #define rte_packet_prefetch(p) do {} while(0)
84 #define VIRTQUEUE_MAX_NAME_SZ 32
86 #ifdef RTE_VIRTIO_USER
88 * Return the physical address (or virtual address in case of
89 * virtio-user) of mbuf data buffer.
91 * The address is firstly casted to the word size (sizeof(uintptr_t))
92 * before casting it to uint64_t. This is to make it work with different
93 * combination of word size (64 bit and 32 bit) and virtio device
94 * (virtio-pci and virtio-user).
96 #define VIRTIO_MBUF_ADDR(mb, vq) \
97 ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
99 #define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
103 * Return the physical address (or virtual address in case of
104 * virtio-user) of mbuf data buffer, taking care of mbuf data offset
106 #define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
107 (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
109 #define VTNET_SQ_RQ_QUEUE_IDX 0
110 #define VTNET_SQ_TQ_QUEUE_IDX 1
111 #define VTNET_SQ_CQ_QUEUE_IDX 2
113 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
115 * The maximum virtqueue size is 2^15. Use that value as the end of
116 * descriptor chain terminator since it will never be a valid index
117 * in the descriptor table. This is used to verify we are correctly
118 * handling vq_free_cnt.
120 #define VQ_RING_DESC_CHAIN_END 32768
123 * Control the RX mode, ie. promiscuous, allmulti, etc...
124 * All commands require an "out" sg entry containing a 1 byte
125 * state value, zero = disable, non-zero = enable. Commands
126 * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
127 * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
129 #define VIRTIO_NET_CTRL_RX 0
130 #define VIRTIO_NET_CTRL_RX_PROMISC 0
131 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
132 #define VIRTIO_NET_CTRL_RX_ALLUNI 2
133 #define VIRTIO_NET_CTRL_RX_NOMULTI 3
134 #define VIRTIO_NET_CTRL_RX_NOUNI 4
135 #define VIRTIO_NET_CTRL_RX_NOBCAST 5
140 * The MAC filter table is managed by the hypervisor, the guest should
141 * assume the size is infinite. Filtering should be considered
142 * non-perfect, ie. based on hypervisor resources, the guest may
143 * received packets from sources not specified in the filter list.
145 * In addition to the class/cmd header, the TABLE_SET command requires
146 * two out scatterlists. Each contains a 4 byte count of entries followed
147 * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
148 * first sg list contains unicast addresses, the second is for multicast.
149 * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
152 * The ADDR_SET command requests one out scatterlist, it contains a
153 * 6 bytes MAC address. This functionality is present if the
154 * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
156 struct virtio_net_ctrl_mac {
158 uint8_t macs[][RTE_ETHER_ADDR_LEN];
159 } __attribute__((__packed__));
161 #define VIRTIO_NET_CTRL_MAC 1
162 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
163 #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
166 * Control VLAN filtering
168 * The VLAN filter table is controlled via a simple ADD/DEL interface.
169 * VLAN IDs not added may be filtered by the hypervisor. Del is the
170 * opposite of add. Both commands expect an out entry containing a 2
171 * byte VLAN ID. VLAN filtering is available with the
172 * VIRTIO_NET_F_CTRL_VLAN feature bit.
174 #define VIRTIO_NET_CTRL_VLAN 2
175 #define VIRTIO_NET_CTRL_VLAN_ADD 0
176 #define VIRTIO_NET_CTRL_VLAN_DEL 1
179 * Control link announce acknowledgement
181 * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
182 * driver has recevied the notification; device would clear the
183 * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
186 #define VIRTIO_NET_CTRL_ANNOUNCE 3
187 #define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
189 struct virtio_net_ctrl_hdr {
192 } __attribute__((packed));
194 typedef uint8_t virtio_net_ctrl_ack;
196 #define VIRTIO_NET_OK 0
197 #define VIRTIO_NET_ERR 1
199 #define VIRTIO_MAX_CTRL_DATA 2048
201 struct virtio_pmd_ctrl {
202 struct virtio_net_ctrl_hdr hdr;
203 virtio_net_ctrl_ack status;
204 uint8_t data[VIRTIO_MAX_CTRL_DATA];
207 struct vq_desc_extra {
214 struct virtio_hw *hw; /**< virtio_hw structure pointer. */
217 /**< vring keeping desc, used and avail */
222 /**< vring keeping descs and events */
223 struct vring_packed ring;
224 bool used_wrap_counter;
225 uint16_t cached_flags; /**< cached flags for descs */
226 uint16_t event_flags_shadow;
230 uint16_t vq_used_cons_idx; /**< last consumed descriptor */
231 uint16_t vq_nentries; /**< vring desc numbers */
232 uint16_t vq_free_cnt; /**< num of desc available */
233 uint16_t vq_avail_idx; /**< sync until needed */
234 uint16_t vq_free_thresh; /**< free threshold */
236 void *vq_ring_virt_mem; /**< linear address of vring*/
237 unsigned int vq_ring_size;
240 struct virtnet_rx rxq;
241 struct virtnet_tx txq;
242 struct virtnet_ctl cq;
245 rte_iova_t vq_ring_mem; /**< physical address of vring,
246 * or virtual address for virtio_user. */
249 * Head of the free chain in the descriptor table. If
250 * there are no free descriptors, this will be set to
251 * VQ_RING_DESC_CHAIN_END.
253 uint16_t vq_desc_head_idx;
254 uint16_t vq_desc_tail_idx;
255 uint16_t vq_queue_index; /**< PCI queue index */
256 uint16_t offset; /**< relative offset to obtain addr in mbuf */
257 uint16_t *notify_addr;
258 struct rte_mbuf **sw_ring; /**< RX software ring. */
259 struct vq_desc_extra vq_descx[0];
262 /* If multiqueue is provided by host, then we suppport it. */
263 #define VIRTIO_NET_CTRL_MQ 4
264 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
265 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
266 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
269 * This is the first element of the scatter-gather list. If you don't
270 * specify GSO or CSUM features, you can simply ignore the header.
272 struct virtio_net_hdr {
273 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /**< Use csum_start,csum_offset*/
274 #define VIRTIO_NET_HDR_F_DATA_VALID 2 /**< Checksum is valid */
276 #define VIRTIO_NET_HDR_GSO_NONE 0 /**< Not a GSO frame */
277 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /**< GSO frame, IPv4 TCP (TSO) */
278 #define VIRTIO_NET_HDR_GSO_UDP 3 /**< GSO frame, IPv4 UDP (UFO) */
279 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /**< GSO frame, IPv6 TCP */
280 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /**< TCP has ECN set */
282 uint16_t hdr_len; /**< Ethernet + IP + tcp/udp hdrs */
283 uint16_t gso_size; /**< Bytes to append to hdr_len per frame */
284 uint16_t csum_start; /**< Position to start checksumming from */
285 uint16_t csum_offset; /**< Offset after that to place checksum */
289 * This is the version of the header to use when the MRG_RXBUF
290 * feature has been negotiated.
292 struct virtio_net_hdr_mrg_rxbuf {
293 struct virtio_net_hdr hdr;
294 uint16_t num_buffers; /**< Number of merged rx buffers */
297 /* Region reserved to allow for transmit header and indirect ring */
298 #define VIRTIO_MAX_TX_INDIRECT 8
299 struct virtio_tx_region {
300 struct virtio_net_hdr_mrg_rxbuf tx_hdr;
301 struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
302 __attribute__((__aligned__(16)));
306 desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
308 uint16_t used, avail, flags;
311 used = !!(flags & VRING_PACKED_DESC_F_USED);
312 avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
314 return avail == used && used == vq->vq_packed.used_wrap_counter;
318 vring_desc_init_packed(struct virtqueue *vq, int n)
321 for (i = 0; i < n - 1; i++) {
322 vq->vq_packed.ring.desc[i].id = i;
323 vq->vq_descx[i].next = i + 1;
325 vq->vq_packed.ring.desc[i].id = i;
326 vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
329 /* Chain all the descriptors in the ring with an END */
331 vring_desc_init_split(struct vring_desc *dp, uint16_t n)
335 for (i = 0; i < n - 1; i++)
336 dp[i].next = (uint16_t)(i + 1);
337 dp[i].next = VQ_RING_DESC_CHAIN_END;
341 * Tell the backend not to interrupt us. Implementation for packed virtqueues.
344 virtqueue_disable_intr_packed(struct virtqueue *vq)
346 if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
347 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
348 vq->vq_packed.ring.driver->desc_event_flags =
349 vq->vq_packed.event_flags_shadow;
354 * Tell the backend not to interrupt us. Implementation for split virtqueues.
357 virtqueue_disable_intr_split(struct virtqueue *vq)
359 vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
363 * Tell the backend not to interrupt us.
366 virtqueue_disable_intr(struct virtqueue *vq)
368 if (vtpci_packed_queue(vq->hw))
369 virtqueue_disable_intr_packed(vq);
371 virtqueue_disable_intr_split(vq);
375 * Tell the backend to interrupt. Implementation for packed virtqueues.
378 virtqueue_enable_intr_packed(struct virtqueue *vq)
380 if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
381 vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
382 vq->vq_packed.ring.driver->desc_event_flags =
383 vq->vq_packed.event_flags_shadow;
388 * Tell the backend to interrupt. Implementation for split virtqueues.
391 virtqueue_enable_intr_split(struct virtqueue *vq)
393 vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
397 * Tell the backend to interrupt us.
400 virtqueue_enable_intr(struct virtqueue *vq)
402 if (vtpci_packed_queue(vq->hw))
403 virtqueue_enable_intr_packed(vq);
405 virtqueue_enable_intr_split(vq);
409 * Dump virtqueue internal structures, for debug purpose only.
411 void virtqueue_dump(struct virtqueue *vq);
413 * Get all mbufs to be freed.
415 struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
417 /* Flush the elements in the used ring. */
418 void virtqueue_rxvq_flush(struct virtqueue *vq);
421 virtqueue_full(const struct virtqueue *vq)
423 return vq->vq_free_cnt == 0;
427 virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
429 if (vtpci_queue_idx == hw->max_queue_pairs * 2)
431 else if (vtpci_queue_idx % 2 == 0)
437 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_split.ring.used->idx - \
438 (vq)->vq_used_cons_idx))
440 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
441 void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
442 void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
446 vq_update_avail_idx(struct virtqueue *vq)
448 virtio_wmb(vq->hw->weak_barriers);
449 vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
453 vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
457 * Place the head of the descriptor chain into the next slot and make
458 * it usable to the host. The chain is made available now rather than
459 * deferring to virtqueue_notify() in the hopes that if the host is
460 * currently running on another CPU, we can keep it processing the new
463 avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
464 if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
465 vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
470 virtqueue_kick_prepare(struct virtqueue *vq)
473 * Ensure updated avail->idx is visible to vhost before reading
476 virtio_mb(vq->hw->weak_barriers);
477 return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
481 virtqueue_kick_prepare_packed(struct virtqueue *vq)
486 * Ensure updated data is visible to vhost before reading the flags.
488 virtio_mb(vq->hw->weak_barriers);
489 flags = vq->vq_packed.ring.device->desc_event_flags;
491 return flags != RING_EVENT_FLAGS_DISABLE;
495 * virtqueue_kick_prepare*() or the virtio_wmb() should be called
496 * before this function to be sure that all the data is visible to vhost.
499 virtqueue_notify(struct virtqueue *vq)
501 VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
504 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
505 #define VIRTQUEUE_DUMP(vq) do { \
506 uint16_t used_idx, nused; \
507 used_idx = (vq)->vq_split.ring.used->idx; \
508 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
509 if (vtpci_packed_queue((vq)->hw)) { \
510 PMD_INIT_LOG(DEBUG, \
511 "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
512 " cached_flags=0x%x; used_wrap_counter=%d", \
513 (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
514 (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
515 (vq)->vq_packed.used_wrap_counter); \
518 PMD_INIT_LOG(DEBUG, \
519 "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
520 " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
521 " avail.flags=0x%x; used.flags=0x%x", \
522 (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
523 (vq)->vq_desc_head_idx, (vq)->vq_split.ring.avail->idx, \
524 (vq)->vq_used_cons_idx, (vq)->vq_split.ring.used->idx, \
525 (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
528 #define VIRTQUEUE_DUMP(vq) do { } while (0)
531 #endif /* _VIRTQUEUE_H_ */