8533e1e82531192067a7f44a5e71cc14bcb436f4
[dpdk.git] / drivers / net / virtio / virtio_rxtx_packed_neon.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Arm Corporation
3  */
4
5 #include <stdlib.h>
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <errno.h>
10
11 #include <rte_net.h>
12 #include <rte_vect.h>
13
14 #include "virtio_ethdev.h"
15 #include "virtio_pci.h"
16 #include "virtio_rxtx_packed.h"
17 #include "virtqueue.h"
18
19 static inline int
20 virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
21                                    struct rte_mbuf **rx_pkts)
22 {
23         struct virtqueue *vq = rxvq->vq;
24         struct virtio_hw *hw = vq->hw;
25         uint16_t head_size = hw->vtnet_hdr_size;
26         uint16_t id = vq->vq_used_cons_idx;
27         struct vring_packed_desc *p_desc;
28         uint16_t i;
29
30         if (id & PACKED_BATCH_MASK)
31                 return -1;
32
33         if (unlikely((id + PACKED_BATCH_SIZE) > vq->vq_nentries))
34                 return -1;
35
36         /* Map packed descriptor to mbuf fields. */
37         uint8x16_t shuf_msk1 = {
38                 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type set as unknown */
39                 0, 1,                   /* octet 1~0, low 16 bits pkt_len */
40                 0xFF, 0xFF,             /* skip high 16 bits of pkt_len, zero out */
41                 0, 1,                   /* octet 1~0, 16 bits data_len */
42                 0xFF, 0xFF,             /* vlan tci set as unknown */
43                 0xFF, 0xFF, 0xFF, 0xFF
44         };
45
46         uint8x16_t shuf_msk2 = {
47                 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type set as unknown */
48                 8, 9,                   /* octet 9~8, low 16 bits pkt_len */
49                 0xFF, 0xFF,             /* skip high 16 bits of pkt_len, zero out */
50                 8, 9,                   /* octet 9~8, 16 bits data_len */
51                 0xFF, 0xFF,             /* vlan tci set as unknown */
52                 0xFF, 0xFF, 0xFF, 0xFF
53         };
54
55         /* Subtract the header length. */
56         uint16x8_t len_adjust = {
57                 0, 0,           /* ignore pkt_type field */
58                 head_size,      /* sub head_size on pkt_len */
59                 0,              /* ignore high 16 bits of pkt_len */
60                 head_size,      /* sub head_size on data_len */
61                 0, 0, 0         /* ignore non-length fields */
62         };
63
64         uint64x2_t desc[PACKED_BATCH_SIZE / 2];
65         uint64x2x2_t mbp[PACKED_BATCH_SIZE / 2];
66         uint64x2_t pkt_mb[PACKED_BATCH_SIZE];
67
68         p_desc = &vq->vq_packed.ring.desc[id];
69         /* Load high 64 bits of packed descriptor 0,1. */
70         desc[0] = vld2q_u64((uint64_t *)(p_desc)).val[1];
71         /* Load high 64 bits of packed descriptor 2,3. */
72         desc[1] = vld2q_u64((uint64_t *)(p_desc + 2)).val[1];
73
74         /* Only care avail/used bits. */
75         uint32x4_t v_mask = vdupq_n_u32(PACKED_FLAGS_MASK);
76         /* Extract high 32 bits of packed descriptor (id, flags). */
77         uint32x4_t v_desc = vuzp2q_u32(vreinterpretq_u32_u64(desc[0]),
78                                 vreinterpretq_u32_u64(desc[1]));
79         uint32x4_t v_flag = vandq_u32(v_desc, v_mask);
80
81         uint32x4_t v_used_flag = vdupq_n_u32(0);
82         if (vq->vq_packed.used_wrap_counter)
83                 v_used_flag = vdupq_n_u32(PACKED_FLAGS_MASK);
84
85         poly128_t desc_stats = vreinterpretq_p128_u32(~vceqq_u32(v_flag, v_used_flag));
86
87         /* Check all descs are used. */
88         if (desc_stats)
89                 return -1;
90
91         /* Load 2 mbuf pointers per time. */
92         mbp[0] = vld2q_u64((uint64_t *)&vq->vq_descx[id]);
93         vst1q_u64((uint64_t *)&rx_pkts[0], mbp[0].val[0]);
94
95         mbp[1] = vld2q_u64((uint64_t *)&vq->vq_descx[id + 2]);
96         vst1q_u64((uint64_t *)&rx_pkts[2], mbp[1].val[0]);
97
98         /**
99          *  Update data length and packet length for descriptor.
100          *  structure of pkt_mb:
101          *  --------------------------------------------------------------------
102          *  |32 bits pkt_type|32 bits pkt_len|16 bits data_len|16 bits vlan_tci|
103          *  --------------------------------------------------------------------
104          */
105         pkt_mb[0] = vreinterpretq_u64_u8(vqtbl1q_u8(
106                         vreinterpretq_u8_u64(desc[0]), shuf_msk1));
107         pkt_mb[1] = vreinterpretq_u64_u8(vqtbl1q_u8(
108                         vreinterpretq_u8_u64(desc[0]), shuf_msk2));
109         pkt_mb[2] = vreinterpretq_u64_u8(vqtbl1q_u8(
110                         vreinterpretq_u8_u64(desc[1]), shuf_msk1));
111         pkt_mb[3] = vreinterpretq_u64_u8(vqtbl1q_u8(
112                         vreinterpretq_u8_u64(desc[1]), shuf_msk2));
113
114         pkt_mb[0] = vreinterpretq_u64_u16(vsubq_u16(
115                         vreinterpretq_u16_u64(pkt_mb[0]), len_adjust));
116         pkt_mb[1] = vreinterpretq_u64_u16(vsubq_u16(
117                         vreinterpretq_u16_u64(pkt_mb[1]), len_adjust));
118         pkt_mb[2] = vreinterpretq_u64_u16(vsubq_u16(
119                         vreinterpretq_u16_u64(pkt_mb[2]), len_adjust));
120         pkt_mb[3] = vreinterpretq_u64_u16(vsubq_u16(
121                         vreinterpretq_u16_u64(pkt_mb[3]), len_adjust));
122
123         vst1q_u64((void *)&rx_pkts[0]->rx_descriptor_fields1, pkt_mb[0]);
124         vst1q_u64((void *)&rx_pkts[1]->rx_descriptor_fields1, pkt_mb[1]);
125         vst1q_u64((void *)&rx_pkts[2]->rx_descriptor_fields1, pkt_mb[2]);
126         vst1q_u64((void *)&rx_pkts[3]->rx_descriptor_fields1, pkt_mb[3]);
127
128         if (hw->has_rx_offload) {
129                 virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
130                         char *addr = (char *)rx_pkts[i]->buf_addr +
131                                 RTE_PKTMBUF_HEADROOM - head_size;
132                         virtio_vec_rx_offload(rx_pkts[i],
133                                         (struct virtio_net_hdr *)addr);
134                 }
135         }
136
137         virtio_update_batch_stats(&rxvq->stats, rx_pkts[0]->pkt_len,
138                         rx_pkts[1]->pkt_len, rx_pkts[2]->pkt_len,
139                         rx_pkts[3]->pkt_len);
140
141         vq->vq_free_cnt += PACKED_BATCH_SIZE;
142
143         vq->vq_used_cons_idx += PACKED_BATCH_SIZE;
144         if (vq->vq_used_cons_idx >= vq->vq_nentries) {
145                 vq->vq_used_cons_idx -= vq->vq_nentries;
146                 vq->vq_packed.used_wrap_counter ^= 1;
147         }
148
149         return 0;
150 }