1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
5 #ifndef _VIRTIO_RXTX_PACKED_H_
6 #define _VIRTIO_RXTX_PACKED_H_
16 #include "virtio_logs.h"
17 #include "virtio_ethdev.h"
18 #include "virtio_pci.h"
19 #include "virtqueue.h"
22 /* flag bits offset in packed ring desc higher 64bits */
23 #define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
24 offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
26 #define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \
29 /* reference count offset in mbuf rearm data */
30 #define REFCNT_BITS_OFFSET ((offsetof(struct rte_mbuf, refcnt) - \
31 offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
32 /* segment number offset in mbuf rearm data */
33 #define SEG_NUM_BITS_OFFSET ((offsetof(struct rte_mbuf, nb_segs) - \
34 offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
36 /* default rearm data */
37 #define DEFAULT_REARM_DATA (1ULL << SEG_NUM_BITS_OFFSET | \
38 1ULL << REFCNT_BITS_OFFSET)
40 /* id bits offset in packed ring desc higher 64bits */
41 #define ID_BITS_OFFSET ((offsetof(struct vring_packed_desc, id) - \
42 offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
44 /* net hdr short size mask */
45 #define NET_HDR_MASK 0x3F
47 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
48 sizeof(struct vring_packed_desc))
49 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
51 #ifdef VIRTIO_GCC_UNROLL_PRAGMA
52 #define virtio_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
53 for (iter = val; iter < size; iter++)
56 #ifdef VIRTIO_CLANG_UNROLL_PRAGMA
57 #define virtio_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
58 for (iter = val; iter < size; iter++)
61 #ifdef VIRTIO_ICC_UNROLL_PRAGMA
62 #define virtio_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
63 for (iter = val; iter < size; iter++)
66 #ifndef virtio_for_each_try_unroll
67 #define virtio_for_each_try_unroll(iter, val, size) \
68 for (iter = val; iter < size; iter++)
72 virtio_update_batch_stats(struct virtnet_stats *stats,
78 stats->bytes += pkt_len1;
79 stats->bytes += pkt_len2;
80 stats->bytes += pkt_len3;
81 stats->bytes += pkt_len4;
85 virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
88 struct virtqueue *vq = txvq->vq;
89 struct virtio_hw *hw = vq->hw;
90 uint16_t hdr_size = hw->vtnet_hdr_size;
91 uint16_t slots, can_push = 0, use_indirect = 0;
94 /* optimize ring usage */
95 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
96 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
97 rte_mbuf_refcnt_read(txm) == 1 && RTE_MBUF_DIRECT(txm) &&
98 txm->nb_segs == 1 && rte_pktmbuf_headroom(txm) >= hdr_size)
100 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
101 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
104 /* How many main ring entries are needed to this Tx?
106 * any_layout => number of segments
107 * default => number of segments + 1
109 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
110 can_push = rte_mbuf_refcnt_read(txm) == 1 &&
111 RTE_MBUF_DIRECT(txm) &&
113 rte_pktmbuf_headroom(txm) >= hdr_size;
115 slots = txm->nb_segs + !can_push;
116 need = slots - vq->vq_free_cnt;
118 /* Positive value indicates it need free vring descriptors */
119 if (unlikely(need > 0)) {
120 virtio_xmit_cleanup_inorder_packed(vq, need);
121 need = slots - vq->vq_free_cnt;
122 if (unlikely(need > 0)) {
124 "No free tx descriptors to transmit");
129 /* Enqueue Packet buffers */
130 virtqueue_enqueue_xmit_packed(txvq, txm, slots, use_indirect,
133 txvq->stats.bytes += txm->pkt_len;
137 /* Optionally fill offload information in structure */
139 virtio_vec_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
141 struct rte_net_hdr_lens hdr_lens;
142 uint32_t hdrlen, ptype;
143 int l4_supported = 0;
149 /* GSO not support in vec path, skip check */
150 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
152 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
153 m->packet_type = ptype;
154 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
155 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
156 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
159 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
160 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
161 if (hdr->csum_start <= hdrlen && l4_supported) {
162 m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
164 /* Unknown proto or tunnel, do sw cksum. We can assume
165 * the cksum field is in the first segment since the
166 * buffers we provided to the host are large enough.
167 * In case of SCTP, this will be wrong since it's a CRC
168 * but there's nothing we can do.
170 uint16_t csum = 0, off;
172 if (rte_raw_cksum_mbuf(m, hdr->csum_start,
173 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
176 if (likely(csum != 0xffff))
178 off = hdr->csum_offset + hdr->csum_start;
179 if (rte_pktmbuf_data_len(m) >= off + 1)
180 *rte_pktmbuf_mtod_offset(m, uint16_t *,
183 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
184 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
190 static inline uint16_t
191 virtqueue_dequeue_single_packed_vec(struct virtnet_rx *rxvq,
192 struct rte_mbuf **rx_pkts)
194 uint16_t used_idx, id;
196 struct virtqueue *vq = rxvq->vq;
197 struct virtio_hw *hw = vq->hw;
198 uint32_t hdr_size = hw->vtnet_hdr_size;
199 struct virtio_net_hdr *hdr;
200 struct vring_packed_desc *desc;
201 struct rte_mbuf *cookie;
203 desc = vq->vq_packed.ring.desc;
204 used_idx = vq->vq_used_cons_idx;
205 if (!desc_is_used(&desc[used_idx], vq))
208 len = desc[used_idx].len;
209 id = desc[used_idx].id;
210 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
211 if (unlikely(cookie == NULL)) {
212 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
213 vq->vq_used_cons_idx);
216 rte_prefetch0(cookie);
217 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
219 cookie->data_off = RTE_PKTMBUF_HEADROOM;
220 cookie->ol_flags = 0;
221 cookie->pkt_len = (uint32_t)(len - hdr_size);
222 cookie->data_len = (uint32_t)(len - hdr_size);
224 hdr = (struct virtio_net_hdr *)((char *)cookie->buf_addr +
225 RTE_PKTMBUF_HEADROOM - hdr_size);
226 if (hw->has_rx_offload)
227 virtio_vec_rx_offload(cookie, hdr);
231 rxvq->stats.bytes += cookie->pkt_len;
234 vq->vq_used_cons_idx++;
235 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
236 vq->vq_used_cons_idx -= vq->vq_nentries;
237 vq->vq_packed.used_wrap_counter ^= 1;
244 virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,
245 struct rte_mbuf **cookie,
248 struct virtqueue *vq = rxvq->vq;
249 struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
250 uint16_t flags = vq->vq_packed.cached_flags;
251 struct virtio_hw *hw = vq->hw;
252 struct vq_desc_extra *dxp;
254 uint16_t batch_num, total_num = 0;
255 uint16_t head_idx = vq->vq_avail_idx;
256 uint16_t head_flag = vq->vq_packed.cached_flags;
260 idx = vq->vq_avail_idx;
262 batch_num = PACKED_BATCH_SIZE;
263 if (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))
264 batch_num = vq->vq_nentries - idx;
265 if (unlikely((total_num + batch_num) > num))
266 batch_num = num - total_num;
268 virtio_for_each_try_unroll(i, 0, batch_num) {
269 dxp = &vq->vq_descx[idx + i];
270 dxp->cookie = (void *)cookie[total_num + i];
272 addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +
273 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
274 start_dp[idx + i].addr = addr;
275 start_dp[idx + i].len = cookie[total_num + i]->buf_len
276 - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
277 if (total_num || i) {
278 virtqueue_store_flags_packed(&start_dp[idx + i],
279 flags, hw->weak_barriers);
283 vq->vq_avail_idx += batch_num;
284 if (vq->vq_avail_idx >= vq->vq_nentries) {
285 vq->vq_avail_idx -= vq->vq_nentries;
286 vq->vq_packed.cached_flags ^=
287 VRING_PACKED_DESC_F_AVAIL_USED;
288 flags = vq->vq_packed.cached_flags;
290 total_num += batch_num;
291 } while (total_num < num);
293 virtqueue_store_flags_packed(&start_dp[head_idx], head_flag,
295 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
298 #endif /* _VIRTIO_RXTX_PACKED_H_ */