9a81cb0c933463cb38beffd65327b94e956b3611
[dpdk.git] / drivers / net / hns3 / hns3_rxtx_vec_sve.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Hisilicon Limited.
3  */
4
5 #include <arm_sve.h>
6 #include <rte_io.h>
7 #include <rte_ethdev_driver.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_rxtx.h"
11 #include "hns3_rxtx_vec.h"
12
13 #define PG16_128BIT             svwhilelt_b16(0, 8)
14 #define PG16_256BIT             svwhilelt_b16(0, 16)
15 #define PG32_256BIT             svwhilelt_b32(0, 8)
16 #define PG64_64BIT              svwhilelt_b64(0, 1)
17 #define PG64_128BIT             svwhilelt_b64(0, 2)
18 #define PG64_256BIT             svwhilelt_b64(0, 4)
19 #define PG64_ALLBIT             svptrue_b64()
20
21 #define BD_SIZE                 32
22 #define BD_FIELD_ADDR_OFFSET    0
23 #define BD_FIELD_L234_OFFSET    8
24 #define BD_FIELD_XLEN_OFFSET    12
25 #define BD_FIELD_RSS_OFFSET     16
26 #define BD_FIELD_OL_OFFSET      24
27 #define BD_FIELD_VALID_OFFSET   28
28
29 typedef struct {
30         uint32_t l234_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
31         uint32_t ol_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
32         uint32_t bd_base_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
33 } HNS3_SVE_KEY_FIELD_S;
34
35 static inline uint32_t
36 hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq,
37                           struct rte_mbuf **rx_pkts,
38                           HNS3_SVE_KEY_FIELD_S *key,
39                           uint32_t   bd_vld_num)
40 {
41         uint32_t retcode = 0;
42         uint32_t cksum_err;
43         int ret, i;
44
45         for (i = 0; i < (int)bd_vld_num; i++) {
46                 /* init rte_mbuf.rearm_data last 64-bit */
47                 rx_pkts[i]->ol_flags = PKT_RX_RSS_HASH;
48
49                 ret = hns3_handle_bdinfo(rxq, rx_pkts[i], key->bd_base_info[i],
50                                          key->l234_info[i], &cksum_err);
51                 if (unlikely(ret)) {
52                         retcode |= 1u << i;
53                         continue;
54                 }
55
56                 rx_pkts[i]->packet_type = hns3_rx_calc_ptype(rxq,
57                                         key->l234_info[i], key->ol_info[i]);
58                 if (likely(key->bd_base_info[i] & BIT(HNS3_RXD_L3L4P_B)))
59                         hns3_rx_set_cksum_flag(rx_pkts[i],
60                                         rx_pkts[i]->packet_type, cksum_err);
61         }
62
63         return retcode;
64 }
65
66 static inline void
67 hns3_rx_prefetch_mbuf_sve(struct hns3_entry *sw_ring)
68 {
69         svuint64_t prf1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[0]);
70         svuint64_t prf2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[4]);
71         svprfd_gather_u64base(PG64_256BIT, prf1st, SV_PLDL1KEEP);
72         svprfd_gather_u64base(PG64_256BIT, prf2st, SV_PLDL1KEEP);
73 }
74
75 static inline uint16_t
76 hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq,
77                         struct rte_mbuf **__restrict rx_pkts,
78                         uint16_t nb_pkts,
79                         uint64_t *bd_err_mask)
80 {
81 #define XLEN_ADJUST_LEN         32
82 #define RSS_ADJUST_LEN          16
83 #define GEN_VLD_U8_ZIP_INDEX    svindex_s8(28, -4)
84         uint16_t rx_id = rxq->next_to_use;
85         struct hns3_entry *sw_ring = &rxq->sw_ring[rx_id];
86         struct hns3_desc *rxdp = &rxq->rx_ring[rx_id];
87         struct hns3_desc *rxdp2;
88         HNS3_SVE_KEY_FIELD_S key_field;
89         uint64_t bd_valid_num;
90         uint32_t parse_retcode;
91         uint16_t nb_rx = 0;
92         int pos, offset;
93
94         uint16_t xlen_adjust[XLEN_ADJUST_LEN] = {
95                 0,  0xffff, 1,  0xffff,    /* 1st mbuf: pkt_len and dat_len */
96                 2,  0xffff, 3,  0xffff,    /* 2st mbuf: pkt_len and dat_len */
97                 4,  0xffff, 5,  0xffff,    /* 3st mbuf: pkt_len and dat_len */
98                 6,  0xffff, 7,  0xffff,    /* 4st mbuf: pkt_len and dat_len */
99                 8,  0xffff, 9,  0xffff,    /* 5st mbuf: pkt_len and dat_len */
100                 10, 0xffff, 11, 0xffff,    /* 6st mbuf: pkt_len and dat_len */
101                 12, 0xffff, 13, 0xffff,    /* 7st mbuf: pkt_len and dat_len */
102                 14, 0xffff, 15, 0xffff,    /* 8st mbuf: pkt_len and dat_len */
103         };
104
105         uint32_t rss_adjust[RSS_ADJUST_LEN] = {
106                 0, 0xffff,        /* 1st mbuf: rss */
107                 1, 0xffff,        /* 2st mbuf: rss */
108                 2, 0xffff,        /* 3st mbuf: rss */
109                 3, 0xffff,        /* 4st mbuf: rss */
110                 4, 0xffff,        /* 5st mbuf: rss */
111                 5, 0xffff,        /* 6st mbuf: rss */
112                 6, 0xffff,        /* 7st mbuf: rss */
113                 7, 0xffff,        /* 8st mbuf: rss */
114         };
115
116         svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
117         svuint16_t xlen_tbl1 = svld1_u16(PG16_256BIT, xlen_adjust);
118         svuint16_t xlen_tbl2 = svld1_u16(PG16_256BIT, &xlen_adjust[16]);
119         svuint32_t rss_tbl1 = svld1_u32(PG32_256BIT, rss_adjust);
120         svuint32_t rss_tbl2 = svld1_u32(PG32_256BIT, &rss_adjust[8]);
121
122         for (pos = 0; pos < nb_pkts; pos += HNS3_SVE_DEFAULT_DESCS_PER_LOOP,
123                                      rxdp += HNS3_SVE_DEFAULT_DESCS_PER_LOOP) {
124                 svuint64_t vld_clz, mbp1st, mbp2st, mbuf_init;
125                 svuint64_t xlen1st, xlen2st, rss1st, rss2st;
126                 svuint32_t l234, ol, vld, vld2, xlen, rss;
127                 svuint8_t  vld_u8;
128
129                 /* calc how many bd valid: part 1 */
130                 vld = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp,
131                         svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE));
132                 vld2 = svlsl_n_u32_z(pg32, vld,
133                                     HNS3_UINT32_BIT - 1 - HNS3_RXD_VLD_B);
134                 vld2 = svreinterpret_u32_s32(svasr_n_s32_z(pg32,
135                         svreinterpret_s32_u32(vld2), HNS3_UINT32_BIT - 1));
136
137                 /* load 4 mbuf pointer */
138                 mbp1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos]);
139
140                 /* calc how many bd valid: part 2 */
141                 vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld2),
142                                   svreinterpret_u8_s8(GEN_VLD_U8_ZIP_INDEX));
143                 vld_clz = svnot_u64_z(PG64_64BIT, svreinterpret_u64_u8(vld_u8));
144                 vld_clz = svclz_u64_z(PG64_64BIT, vld_clz);
145                 svst1_u64(PG64_64BIT, &bd_valid_num, vld_clz);
146                 bd_valid_num /= HNS3_UINT8_BIT;
147
148                 /* load 4 more mbuf pointer */
149                 mbp2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos + 4]);
150
151                 /* use offset to control below data load oper ordering */
152                 offset = rxq->offset_table[bd_valid_num];
153                 rxdp2 = rxdp + offset;
154
155                 /* store 4 mbuf pointer into rx_pkts */
156                 svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos], mbp1st);
157
158                 /* load key field to vector reg */
159                 l234 = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
160                                 svindex_u32(BD_FIELD_L234_OFFSET, BD_SIZE));
161                 ol = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
162                                 svindex_u32(BD_FIELD_OL_OFFSET, BD_SIZE));
163
164                 /* store 4 mbuf pointer into rx_pkts again */
165                 svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos + 4], mbp2st);
166
167                 /* load datalen, pktlen and rss_hash */
168                 xlen = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
169                                 svindex_u32(BD_FIELD_XLEN_OFFSET, BD_SIZE));
170                 rss = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
171                                 svindex_u32(BD_FIELD_RSS_OFFSET, BD_SIZE));
172
173                 /* store key field to stash buffer */
174                 svst1_u32(pg32, (uint32_t *)key_field.l234_info, l234);
175                 svst1_u32(pg32, (uint32_t *)key_field.bd_base_info, vld);
176                 svst1_u32(pg32, (uint32_t *)key_field.ol_info, ol);
177
178                 /* sub crc_len for pkt_len and data_len */
179                 xlen = svreinterpret_u32_u16(svsub_n_u16_z(PG16_256BIT,
180                         svreinterpret_u16_u32(xlen), rxq->crc_len));
181
182                 /* init mbuf_initializer */
183                 mbuf_init = svdup_n_u64(rxq->mbuf_initializer);
184
185                 /* extract datalen, pktlen and rss from xlen and rss */
186                 xlen1st = svreinterpret_u64_u16(
187                         svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl1));
188                 xlen2st = svreinterpret_u64_u16(
189                         svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl2));
190                 rss1st = svreinterpret_u64_u32(
191                         svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl1));
192                 rss2st = svreinterpret_u64_u32(
193                         svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl2));
194
195                 /* save mbuf_initializer */
196                 svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
197                         offsetof(struct rte_mbuf, rearm_data), mbuf_init);
198                 svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
199                         offsetof(struct rte_mbuf, rearm_data), mbuf_init);
200
201                 /* save datalen and pktlen and rss */
202                 svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
203                         offsetof(struct rte_mbuf, pkt_len), xlen1st);
204                 svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
205                         offsetof(struct rte_mbuf, hash.rss), rss1st);
206                 svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
207                         offsetof(struct rte_mbuf, pkt_len), xlen2st);
208                 svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
209                         offsetof(struct rte_mbuf, hash.rss), rss2st);
210
211                 rte_prefetch_non_temporal(rxdp +
212                                           HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
213
214                 parse_retcode = hns3_desc_parse_field_sve(rxq, &rx_pkts[pos],
215                                         &key_field, bd_valid_num);
216                 if (unlikely(parse_retcode))
217                         (*bd_err_mask) |= ((uint64_t)parse_retcode) << pos;
218
219                 hns3_rx_prefetch_mbuf_sve(&sw_ring[pos +
220                                         HNS3_SVE_DEFAULT_DESCS_PER_LOOP]);
221
222                 nb_rx += bd_valid_num;
223                 if (unlikely(bd_valid_num < HNS3_SVE_DEFAULT_DESCS_PER_LOOP))
224                         break;
225         }
226
227         rxq->rx_rearm_nb += nb_rx;
228         rxq->next_to_use += nb_rx;
229         if (rxq->next_to_use >= rxq->nb_rx_desc)
230                 rxq->next_to_use = 0;
231
232         return nb_rx;
233 }
234
235 static inline void
236 hns3_rxq_rearm_mbuf_sve(struct hns3_rx_queue *rxq)
237 {
238 #define REARM_LOOP_STEP_NUM     4
239         struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start];
240         struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start;
241         struct hns3_entry *rxep_tmp = rxep;
242         int i;
243
244         if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
245                                           HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) {
246                 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
247                 return;
248         }
249
250         for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
251                 rxep_tmp += REARM_LOOP_STEP_NUM) {
252                 svuint64_t prf = svld1_u64(PG64_256BIT, (uint64_t *)rxep_tmp);
253                 svprfd_gather_u64base(PG64_256BIT, prf, SV_PLDL1STRM);
254         }
255
256         for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
257                 rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) {
258                 uint64_t iova[REARM_LOOP_STEP_NUM];
259                 iova[0] = rxep[0].mbuf->buf_iova;
260                 iova[1] = rxep[1].mbuf->buf_iova;
261                 iova[2] = rxep[2].mbuf->buf_iova;
262                 iova[3] = rxep[3].mbuf->buf_iova;
263                 svuint64_t siova = svld1_u64(PG64_256BIT, iova);
264                 siova = svadd_n_u64_z(PG64_256BIT, siova, RTE_PKTMBUF_HEADROOM);
265                 svuint64_t ol_base = svdup_n_u64(0);
266                 svst1_scatter_u64offset_u64(PG64_256BIT,
267                         (uint64_t *)&rxdp[0].addr,
268                         svindex_u64(BD_FIELD_ADDR_OFFSET, BD_SIZE), siova);
269                 svst1_scatter_u64offset_u64(PG64_256BIT,
270                         (uint64_t *)&rxdp[0].addr,
271                         svindex_u64(BD_FIELD_OL_OFFSET, BD_SIZE), ol_base);
272         }
273
274         rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH;
275         if (rxq->rx_rearm_start >= rxq->nb_rx_desc)
276                 rxq->rx_rearm_start = 0;
277
278         rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH;
279
280         hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH);
281 }
282
283 uint16_t
284 hns3_recv_pkts_vec_sve(void *__restrict rx_queue,
285                        struct rte_mbuf **__restrict rx_pkts,
286                        uint16_t nb_pkts)
287 {
288         struct hns3_rx_queue *rxq = rx_queue;
289         struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use];
290         uint64_t bd_err_mask;  /* bit mask indicate whick pkts is error */
291         uint16_t nb_rx;
292
293         rte_prefetch_non_temporal(rxdp);
294
295         nb_pkts = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
296         nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
297
298         if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
299                 hns3_rxq_rearm_mbuf_sve(rxq);
300
301         if (unlikely(!(rxdp->rx.bd_base_info &
302                         rte_cpu_to_le_32(1u << HNS3_RXD_VLD_B))))
303                 return 0;
304
305         hns3_rx_prefetch_mbuf_sve(&rxq->sw_ring[rxq->next_to_use]);
306
307         bd_err_mask = 0;
308         nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts, &bd_err_mask);
309         if (unlikely(bd_err_mask))
310                 nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, bd_err_mask);
311
312         return nb_rx;
313 }