1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
13 #include "virtio_logs.h"
14 #include "virtio_ethdev.h"
16 #include "virtio_rxtx_packed.h"
17 #include "virtqueue.h"
20 virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
21 struct rte_mbuf **tx_pkts)
23 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
24 uint16_t head_size = vq->hw->vtnet_hdr_size;
25 uint16_t idx = vq->vq_avail_idx;
26 struct virtio_net_hdr *hdr;
27 struct vq_desc_extra *dxp;
30 if (vq->vq_avail_idx & PACKED_BATCH_MASK)
33 if (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))
36 /* Load four mbufs rearm data */
37 RTE_BUILD_BUG_ON(REFCNT_BITS_OFFSET >= 64);
38 RTE_BUILD_BUG_ON(SEG_NUM_BITS_OFFSET >= 64);
39 __m256i mbufs = _mm256_set_epi64x(*tx_pkts[3]->rearm_data,
40 *tx_pkts[2]->rearm_data,
41 *tx_pkts[1]->rearm_data,
42 *tx_pkts[0]->rearm_data);
44 /* refcnt=1 and nb_segs=1 */
45 __m256i mbuf_ref = _mm256_set1_epi64x(DEFAULT_REARM_DATA);
46 __m256i head_rooms = _mm256_set1_epi16(head_size);
48 /* Check refcnt and nb_segs */
49 const __mmask16 mask = 0x6 | 0x6 << 4 | 0x6 << 8 | 0x6 << 12;
50 cmp = _mm256_mask_cmpneq_epu16_mask(mask, mbufs, mbuf_ref);
54 /* Check headroom is enough */
55 const __mmask16 data_mask = 0x1 | 0x1 << 4 | 0x1 << 8 | 0x1 << 12;
56 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
57 offsetof(struct rte_mbuf, rearm_data));
58 cmp = _mm256_mask_cmplt_epu16_mask(data_mask, mbufs, head_rooms);
62 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
63 dxp = &vq->vq_descx[idx + i];
65 dxp->cookie = tx_pkts[i];
68 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
69 tx_pkts[i]->data_off -= head_size;
70 tx_pkts[i]->data_len += head_size;
73 __m512i descs_base = _mm512_set_epi64(tx_pkts[3]->data_len,
80 tx_pkts[0]->buf_iova);
82 /* id offset and data offset */
83 __m512i data_offsets = _mm512_set_epi64((uint64_t)3 << ID_BITS_OFFSET,
85 (uint64_t)2 << ID_BITS_OFFSET,
87 (uint64_t)1 << ID_BITS_OFFSET,
89 0, tx_pkts[0]->data_off);
91 __m512i new_descs = _mm512_add_epi64(descs_base, data_offsets);
93 uint64_t flags_temp = (uint64_t)idx << ID_BITS_OFFSET |
94 (uint64_t)vq->vq_packed.cached_flags << FLAGS_BITS_OFFSET;
96 /* flags offset and guest virtual address offset */
97 __m128i flag_offset = _mm_set_epi64x(flags_temp, 0);
98 __m512i v_offset = _mm512_broadcast_i32x4(flag_offset);
99 __m512i v_desc = _mm512_add_epi64(new_descs, v_offset);
101 if (!vq->hw->has_tx_offload) {
102 __m128i all_mask = _mm_set1_epi16(0xFFFF);
103 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
104 hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
105 struct virtio_net_hdr *, -head_size);
106 __m128i v_hdr = _mm_loadu_si128((void *)hdr);
107 if (unlikely(_mm_mask_test_epi16_mask(NET_HDR_MASK,
109 __m128i all_zero = _mm_setzero_si128();
110 _mm_mask_storeu_epi16((void *)hdr,
111 NET_HDR_MASK, all_zero);
115 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
116 hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
117 struct virtio_net_hdr *, -head_size);
118 virtqueue_xmit_offload(hdr, tx_pkts[i]);
122 /* Enqueue Packet buffers */
123 _mm512_storeu_si512((void *)&vq->vq_packed.ring.desc[idx], v_desc);
125 virtio_update_batch_stats(&txvq->stats, tx_pkts[0]->pkt_len,
126 tx_pkts[1]->pkt_len, tx_pkts[2]->pkt_len,
127 tx_pkts[3]->pkt_len);
129 vq->vq_avail_idx += PACKED_BATCH_SIZE;
130 vq->vq_free_cnt -= PACKED_BATCH_SIZE;
132 if (vq->vq_avail_idx >= vq->vq_nentries) {
133 vq->vq_avail_idx -= vq->vq_nentries;
134 vq->vq_packed.cached_flags ^=
135 VRING_PACKED_DESC_F_AVAIL_USED;
141 static inline uint16_t
142 virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
143 struct rte_mbuf **rx_pkts)
145 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
146 struct virtio_hw *hw = vq->hw;
147 uint16_t hdr_size = hw->vtnet_hdr_size;
148 uint64_t addrs[PACKED_BATCH_SIZE];
149 uint16_t id = vq->vq_used_cons_idx;
154 if (id & PACKED_BATCH_MASK)
157 if (unlikely((id + PACKED_BATCH_SIZE) > vq->vq_nentries))
160 /* only care avail/used bits */
161 #if defined(RTE_ARCH_I686)
162 __m512i v_mask = _mm512_set4_epi64(PACKED_FLAGS_MASK, 0x0,
163 PACKED_FLAGS_MASK, 0x0);
165 __m512i v_mask = _mm512_maskz_set1_epi64(0xaa, PACKED_FLAGS_MASK);
167 desc_addr = &vq->vq_packed.ring.desc[id];
169 __m512i v_desc = _mm512_loadu_si512(desc_addr);
170 __m512i v_flag = _mm512_and_epi64(v_desc, v_mask);
172 __m512i v_used_flag = _mm512_setzero_si512();
173 if (vq->vq_packed.used_wrap_counter)
174 #if defined(RTE_ARCH_I686)
175 v_used_flag = _mm512_set4_epi64(PACKED_FLAGS_MASK, 0x0,
176 PACKED_FLAGS_MASK, 0x0);
178 v_used_flag = _mm512_maskz_set1_epi64(0xaa, PACKED_FLAGS_MASK);
181 /* Check all descs are used */
182 desc_stats = _mm512_cmpneq_epu64_mask(v_flag, v_used_flag);
186 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
187 rx_pkts[i] = (struct rte_mbuf *)vq->vq_descx[id + i].cookie;
188 rte_packet_prefetch(rte_pktmbuf_mtod(rx_pkts[i], void *));
190 addrs[i] = (uintptr_t)rx_pkts[i]->rx_descriptor_fields1;
194 * load len from desc, store into mbuf pkt_len and data_len
195 * len limiated by l6bit buf_len, pkt_len[16:31] can be ignored
197 const __mmask16 mask = 0x6 | 0x6 << 4 | 0x6 << 8 | 0x6 << 12;
198 __m512i values = _mm512_maskz_shuffle_epi32(mask, v_desc, 0xAA);
200 /* reduce hdr_len from pkt_len and data_len */
201 __m512i mbuf_len_offset = _mm512_maskz_set1_epi32(mask,
202 (uint32_t)-hdr_size);
204 __m512i v_value = _mm512_add_epi32(values, mbuf_len_offset);
206 /* assert offset of data_len */
207 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
208 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
210 __m512i v_index = _mm512_set_epi64(addrs[3] + 8, addrs[3],
211 addrs[2] + 8, addrs[2],
212 addrs[1] + 8, addrs[1],
213 addrs[0] + 8, addrs[0]);
214 /* batch store into mbufs */
215 _mm512_i64scatter_epi64(0, v_index, v_value, 1);
217 if (hw->has_rx_offload) {
218 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
219 char *addr = (char *)rx_pkts[i]->buf_addr +
220 RTE_PKTMBUF_HEADROOM - hdr_size;
221 virtio_vec_rx_offload(rx_pkts[i],
222 (struct virtio_net_hdr *)addr);
226 virtio_update_batch_stats(&rxvq->stats, rx_pkts[0]->pkt_len,
227 rx_pkts[1]->pkt_len, rx_pkts[2]->pkt_len,
228 rx_pkts[3]->pkt_len);
230 vq->vq_free_cnt += PACKED_BATCH_SIZE;
232 vq->vq_used_cons_idx += PACKED_BATCH_SIZE;
233 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
234 vq->vq_used_cons_idx -= vq->vq_nentries;
235 vq->vq_packed.used_wrap_counter ^= 1;