1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Arm Corporation
14 #include "virtio_ethdev.h"
15 #include "virtio_pci.h"
16 #include "virtio_rxtx_packed.h"
17 #include "virtqueue.h"
20 virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
21 struct rte_mbuf **tx_pkts)
23 struct virtqueue *vq = txvq->vq;
24 uint16_t head_size = vq->hw->vtnet_hdr_size;
25 uint16_t idx = vq->vq_avail_idx;
26 struct virtio_net_hdr *hdr;
27 struct vq_desc_extra *dxp;
28 struct vring_packed_desc *p_desc;
31 if (idx & PACKED_BATCH_MASK)
34 if (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))
37 /* Map four refcnt and nb_segs from mbufs to one NEON register. */
38 uint8x16_t ref_seg_msk = {
45 /* Map four data_off from mbufs to one NEON register. */
46 uint8x8_t data_msk = {
53 uint16x8_t net_hdr_msk = {
58 uint16x4_t pkts[PACKED_BATCH_SIZE];
60 /* Load four mbufs rearm data. */
61 RTE_BUILD_BUG_ON(REFCNT_BITS_OFFSET >= 64);
62 pkts[0] = vld1_u16((uint16_t *)&tx_pkts[0]->rearm_data);
63 pkts[1] = vld1_u16((uint16_t *)&tx_pkts[1]->rearm_data);
64 pkts[2] = vld1_u16((uint16_t *)&tx_pkts[2]->rearm_data);
65 pkts[3] = vld1_u16((uint16_t *)&tx_pkts[3]->rearm_data);
67 mbuf.val[0] = vreinterpretq_u8_u16(vcombine_u16(pkts[0], pkts[1]));
68 mbuf.val[1] = vreinterpretq_u8_u16(vcombine_u16(pkts[2], pkts[3]));
70 /* refcnt = 1 and nb_segs = 1 */
71 uint32x4_t def_ref_seg = vdupq_n_u32(0x10001);
72 /* Check refcnt and nb_segs. */
73 uint32x4_t ref_seg = vreinterpretq_u32_u8(vqtbl2q_u8(mbuf, ref_seg_msk));
74 poly128_t cmp1 = vreinterpretq_p128_u32(~vceqq_u32(ref_seg, def_ref_seg));
78 /* Check headroom is enough. */
79 uint16x4_t head_rooms = vdup_n_u16(head_size);
80 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
81 offsetof(struct rte_mbuf, rearm_data));
82 uint16x4_t data_offset = vreinterpret_u16_u8(vqtbl2_u8(mbuf, data_msk));
83 uint64x1_t cmp2 = vreinterpret_u64_u16(vclt_u16(data_offset, head_rooms));
84 if (unlikely(vget_lane_u64(cmp2, 0)))
87 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
88 dxp = &vq->vq_descx[idx + i];
90 dxp->cookie = tx_pkts[i];
93 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
94 tx_pkts[i]->data_off -= head_size;
95 tx_pkts[i]->data_len += head_size;
98 uint64x2x2_t desc[PACKED_BATCH_SIZE / 2];
99 uint64x2_t base_addr0 = {
100 VIRTIO_MBUF_ADDR(tx_pkts[0], vq) + tx_pkts[0]->data_off,
101 VIRTIO_MBUF_ADDR(tx_pkts[1], vq) + tx_pkts[1]->data_off
103 uint64x2_t base_addr1 = {
104 VIRTIO_MBUF_ADDR(tx_pkts[2], vq) + tx_pkts[2]->data_off,
105 VIRTIO_MBUF_ADDR(tx_pkts[3], vq) + tx_pkts[3]->data_off
108 desc[0].val[0] = base_addr0;
109 desc[1].val[0] = base_addr1;
111 uint64_t flags = (uint64_t)vq->vq_packed.cached_flags << FLAGS_LEN_BITS_OFFSET;
112 uint64x2_t tx_desc0 = {
113 flags | (uint64_t)idx << ID_BITS_OFFSET | tx_pkts[0]->data_len,
114 flags | (uint64_t)(idx + 1) << ID_BITS_OFFSET | tx_pkts[1]->data_len
117 uint64x2_t tx_desc1 = {
118 flags | (uint64_t)(idx + 2) << ID_BITS_OFFSET | tx_pkts[2]->data_len,
119 flags | (uint64_t)(idx + 3) << ID_BITS_OFFSET | tx_pkts[3]->data_len
122 desc[0].val[1] = tx_desc0;
123 desc[1].val[1] = tx_desc1;
125 if (!vq->hw->has_tx_offload) {
126 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
127 hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
128 struct virtio_net_hdr *, -head_size);
130 uint16x8_t v_hdr = vld1q_u16((void *)hdr);
131 vst1q_u16((void *)hdr, vandq_u16(v_hdr, net_hdr_msk));
134 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
135 hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
136 struct virtio_net_hdr *, -head_size);
137 virtqueue_xmit_offload(hdr, tx_pkts[i], true);
141 /* Enqueue packet buffers. */
142 p_desc = &vq->vq_packed.ring.desc[idx];
143 vst2q_u64((uint64_t *)p_desc, desc[0]);
144 vst2q_u64((uint64_t *)(p_desc + 2), desc[1]);
146 virtio_update_batch_stats(&txvq->stats, tx_pkts[0]->pkt_len,
147 tx_pkts[1]->pkt_len, tx_pkts[2]->pkt_len,
148 tx_pkts[3]->pkt_len);
150 vq->vq_avail_idx += PACKED_BATCH_SIZE;
151 vq->vq_free_cnt -= PACKED_BATCH_SIZE;
153 if (vq->vq_avail_idx >= vq->vq_nentries) {
154 vq->vq_avail_idx -= vq->vq_nentries;
155 vq->vq_packed.cached_flags ^=
156 VRING_PACKED_DESC_F_AVAIL_USED;
163 virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
164 struct rte_mbuf **rx_pkts)
166 struct virtqueue *vq = rxvq->vq;
167 struct virtio_hw *hw = vq->hw;
168 uint16_t head_size = hw->vtnet_hdr_size;
169 uint16_t id = vq->vq_used_cons_idx;
170 struct vring_packed_desc *p_desc;
173 if (id & PACKED_BATCH_MASK)
176 if (unlikely((id + PACKED_BATCH_SIZE) > vq->vq_nentries))
179 /* Map packed descriptor to mbuf fields. */
180 uint8x16_t shuf_msk1 = {
181 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type set as unknown */
182 0, 1, /* octet 1~0, low 16 bits pkt_len */
183 0xFF, 0xFF, /* skip high 16 bits of pkt_len, zero out */
184 0, 1, /* octet 1~0, 16 bits data_len */
185 0xFF, 0xFF, /* vlan tci set as unknown */
186 0xFF, 0xFF, 0xFF, 0xFF
189 uint8x16_t shuf_msk2 = {
190 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type set as unknown */
191 8, 9, /* octet 9~8, low 16 bits pkt_len */
192 0xFF, 0xFF, /* skip high 16 bits of pkt_len, zero out */
193 8, 9, /* octet 9~8, 16 bits data_len */
194 0xFF, 0xFF, /* vlan tci set as unknown */
195 0xFF, 0xFF, 0xFF, 0xFF
198 /* Subtract the header length. */
199 uint16x8_t len_adjust = {
200 0, 0, /* ignore pkt_type field */
201 head_size, /* sub head_size on pkt_len */
202 0, /* ignore high 16 bits of pkt_len */
203 head_size, /* sub head_size on data_len */
204 0, 0, 0 /* ignore non-length fields */
207 uint64x2_t desc[PACKED_BATCH_SIZE / 2];
208 uint64x2x2_t mbp[PACKED_BATCH_SIZE / 2];
209 uint64x2_t pkt_mb[PACKED_BATCH_SIZE];
211 p_desc = &vq->vq_packed.ring.desc[id];
212 /* Load high 64 bits of packed descriptor 0,1. */
213 desc[0] = vld2q_u64((uint64_t *)(p_desc)).val[1];
214 /* Load high 64 bits of packed descriptor 2,3. */
215 desc[1] = vld2q_u64((uint64_t *)(p_desc + 2)).val[1];
217 /* Only care avail/used bits. */
218 uint32x4_t v_mask = vdupq_n_u32(PACKED_FLAGS_MASK);
219 /* Extract high 32 bits of packed descriptor (id, flags). */
220 uint32x4_t v_desc = vuzp2q_u32(vreinterpretq_u32_u64(desc[0]),
221 vreinterpretq_u32_u64(desc[1]));
222 uint32x4_t v_flag = vandq_u32(v_desc, v_mask);
224 uint32x4_t v_used_flag = vdupq_n_u32(0);
225 if (vq->vq_packed.used_wrap_counter)
226 v_used_flag = vdupq_n_u32(PACKED_FLAGS_MASK);
228 poly128_t desc_stats = vreinterpretq_p128_u32(~vceqq_u32(v_flag, v_used_flag));
230 /* Check all descs are used. */
234 /* Load 2 mbuf pointers per time. */
235 mbp[0] = vld2q_u64((uint64_t *)&vq->vq_descx[id]);
236 vst1q_u64((uint64_t *)&rx_pkts[0], mbp[0].val[0]);
238 mbp[1] = vld2q_u64((uint64_t *)&vq->vq_descx[id + 2]);
239 vst1q_u64((uint64_t *)&rx_pkts[2], mbp[1].val[0]);
242 * Update data length and packet length for descriptor.
243 * structure of pkt_mb:
244 * --------------------------------------------------------------------
245 * |32 bits pkt_type|32 bits pkt_len|16 bits data_len|16 bits vlan_tci|
246 * --------------------------------------------------------------------
248 pkt_mb[0] = vreinterpretq_u64_u8(vqtbl1q_u8(
249 vreinterpretq_u8_u64(desc[0]), shuf_msk1));
250 pkt_mb[1] = vreinterpretq_u64_u8(vqtbl1q_u8(
251 vreinterpretq_u8_u64(desc[0]), shuf_msk2));
252 pkt_mb[2] = vreinterpretq_u64_u8(vqtbl1q_u8(
253 vreinterpretq_u8_u64(desc[1]), shuf_msk1));
254 pkt_mb[3] = vreinterpretq_u64_u8(vqtbl1q_u8(
255 vreinterpretq_u8_u64(desc[1]), shuf_msk2));
257 pkt_mb[0] = vreinterpretq_u64_u16(vsubq_u16(
258 vreinterpretq_u16_u64(pkt_mb[0]), len_adjust));
259 pkt_mb[1] = vreinterpretq_u64_u16(vsubq_u16(
260 vreinterpretq_u16_u64(pkt_mb[1]), len_adjust));
261 pkt_mb[2] = vreinterpretq_u64_u16(vsubq_u16(
262 vreinterpretq_u16_u64(pkt_mb[2]), len_adjust));
263 pkt_mb[3] = vreinterpretq_u64_u16(vsubq_u16(
264 vreinterpretq_u16_u64(pkt_mb[3]), len_adjust));
266 vst1q_u64((void *)&rx_pkts[0]->rx_descriptor_fields1, pkt_mb[0]);
267 vst1q_u64((void *)&rx_pkts[1]->rx_descriptor_fields1, pkt_mb[1]);
268 vst1q_u64((void *)&rx_pkts[2]->rx_descriptor_fields1, pkt_mb[2]);
269 vst1q_u64((void *)&rx_pkts[3]->rx_descriptor_fields1, pkt_mb[3]);
271 if (hw->has_rx_offload) {
272 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
273 char *addr = (char *)rx_pkts[i]->buf_addr +
274 RTE_PKTMBUF_HEADROOM - head_size;
275 virtio_vec_rx_offload(rx_pkts[i],
276 (struct virtio_net_hdr *)addr);
280 virtio_update_batch_stats(&rxvq->stats, rx_pkts[0]->pkt_len,
281 rx_pkts[1]->pkt_len, rx_pkts[2]->pkt_len,
282 rx_pkts[3]->pkt_len);
284 vq->vq_free_cnt += PACKED_BATCH_SIZE;
286 vq->vq_used_cons_idx += PACKED_BATCH_SIZE;
287 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
288 vq->vq_used_cons_idx -= vq->vq_nentries;
289 vq->vq_packed.used_wrap_counter ^= 1;