net/bnxt: modify ring index logic
[dpdk.git] / drivers / net / bnxt / bnxt_rxtx_vec_neon.c
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2020 Broadcom All rights reserved. */
3
4 #include <inttypes.h>
5 #include <stdbool.h>
6
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
11 #include <rte_vect.h>
12
13 #include "bnxt.h"
14 #include "bnxt_cpr.h"
15 #include "bnxt_ring.h"
16
17 #include "bnxt_txq.h"
18 #include "bnxt_txr.h"
19 #include "bnxt_rxtx_vec_common.h"
20
21 /*
22  * RX Ring handling
23  */
24
25 #define GET_OL_FLAGS(rss_flags, ol_idx, errors, pi, ol_flags)                  \
26 {                                                                              \
27         uint32_t tmp, of;                                                      \
28                                                                                \
29         of = vgetq_lane_u32((rss_flags), (pi)) |                               \
30                    bnxt_ol_flags_table[vgetq_lane_u32((ol_idx), (pi))];        \
31                                                                                \
32         tmp = vgetq_lane_u32((errors), (pi));                                  \
33         if (tmp)                                                               \
34                 of |= bnxt_ol_flags_err_table[tmp];                            \
35         (ol_flags) = of;                                                       \
36 }
37
38 #define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pkt_idx, ret)      \
39 {                                                                              \
40         uint32_t ptype;                                                        \
41         uint16_t vlan_tci;                                                     \
42         uint32x4_t r;                                                          \
43                                                                                \
44         /* Set mbuf pkt_len, data_len, and rss_hash fields. */                 \
45         r = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(rxcmp),       \
46                                               (shuf_msk)));                    \
47                                                                                \
48         /* Set packet type. */                                                 \
49         ptype = bnxt_ptype_table[vgetq_lane_u32((ptype_idx), (pkt_idx))];      \
50         r = vsetq_lane_u32(ptype, r, 0);                                       \
51                                                                                \
52         /* Set vlan_tci. */                                                    \
53         vlan_tci = vgetq_lane_u32((rxcmp1), 1);                                \
54         r = vreinterpretq_u32_u16(vsetq_lane_u16(vlan_tci,                     \
55                                 vreinterpretq_u16_u32(r), 5));                 \
56         (ret) = r;                                                             \
57 }
58
59 static void
60 descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
61                uint64x2_t mb_init, struct rte_mbuf **mbuf)
62 {
63         const uint8x16_t shuf_msk = {
64                 0xFF, 0xFF, 0xFF, 0xFF,    /* pkt_type (zeroes) */
65                 2, 3, 0xFF, 0xFF,          /* pkt_len */
66                 2, 3,                      /* data_len */
67                 0xFF, 0xFF,                /* vlan_tci (zeroes) */
68                 12, 13, 14, 15             /* rss hash */
69         };
70         const uint32x4_t flags_type_mask =
71                 vdupq_n_u32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
72         const uint32x4_t flags2_mask1 =
73                 vdupq_n_u32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
74                             RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC);
75         const uint32x4_t flags2_mask2 =
76                 vdupq_n_u32(RX_PKT_CMPL_FLAGS2_IP_TYPE);
77         const uint32x4_t rss_mask =
78                 vdupq_n_u32(RX_PKT_CMPL_FLAGS_RSS_VALID);
79         const uint32x4_t flags2_index_mask = vdupq_n_u32(0x1F);
80         const uint32x4_t flags2_error_mask = vdupq_n_u32(0x0F);
81         uint32x4_t flags_type, flags2, index, errors, rss_flags;
82         uint32x4_t tmp, ptype_idx;
83         uint64x2_t t0, t1;
84         uint32_t ol_flags;
85
86         /* Compute packet type table indexes for four packets */
87         t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[0], mm_rxcmp[1]));
88         t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[2], mm_rxcmp[3]));
89
90         flags_type = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
91                                                         vget_low_u64(t1)));
92         ptype_idx =
93                 vshrq_n_u32(vandq_u32(flags_type, flags_type_mask), 9);
94
95         t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
96         t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
97
98         flags2 = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
99                                                     vget_low_u64(t1)));
100
101         ptype_idx = vorrq_u32(ptype_idx,
102                         vshrq_n_u32(vandq_u32(flags2, flags2_mask1), 2));
103         ptype_idx = vorrq_u32(ptype_idx,
104                         vshrq_n_u32(vandq_u32(flags2, flags2_mask2), 7));
105
106         /* Extract RSS valid flags for four packets. */
107         rss_flags = vshrq_n_u32(vandq_u32(flags_type, rss_mask), 9);
108
109         flags2 = vandq_u32(flags2, flags2_index_mask);
110
111         /* Extract errors_v2 fields for four packets. */
112         t0 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
113         t1 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
114
115         errors = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
116                                                     vget_low_u64(t1)));
117
118         /* Compute ol_flags and checksum error indexes for four packets. */
119         errors = vandq_u32(vshrq_n_u32(errors, 4), flags2_error_mask);
120         errors = vandq_u32(errors, flags2);
121
122         index = vbicq_u32(flags2, errors);
123
124         /* Update mbuf rearm_data for four packets. */
125         GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
126         vst1q_u32((uint32_t *)&mbuf[0]->rearm_data,
127                   vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
128         GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags);
129         vst1q_u32((uint32_t *)&mbuf[1]->rearm_data,
130                   vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
131         GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags);
132         vst1q_u32((uint32_t *)&mbuf[2]->rearm_data,
133                   vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
134         GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags);
135         vst1q_u32((uint32_t *)&mbuf[3]->rearm_data,
136                   vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
137
138         /* Update mbuf rx_descriptor_fields1 for four packets. */
139         GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, tmp);
140         vst1q_u32((uint32_t *)&mbuf[0]->rx_descriptor_fields1, tmp);
141         GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, tmp);
142         vst1q_u32((uint32_t *)&mbuf[1]->rx_descriptor_fields1, tmp);
143         GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, tmp);
144         vst1q_u32((uint32_t *)&mbuf[2]->rx_descriptor_fields1, tmp);
145         GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, tmp);
146         vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp);
147 }
148
149 uint16_t
150 bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
151                    uint16_t nb_pkts)
152 {
153         struct bnxt_rx_queue *rxq = rx_queue;
154         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
155         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
156         uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
157         uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
158         struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
159         uint64_t valid, desc_valid_mask = ~0UL;
160         const uint32x4_t info3_v_mask = vdupq_n_u32(CMPL_BASE_V);
161         uint32_t raw_cons = cpr->cp_raw_cons;
162         uint32_t cons, mbcons;
163         int nb_rx_pkts = 0;
164         const uint64x2_t mb_init = {rxq->mbuf_initializer, 0};
165         const uint32x4_t valid_target =
166                 vdupq_n_u32(!!(raw_cons & cp_ring_size));
167         int i;
168
169         /* If Rx Q was stopped return */
170         if (unlikely(!rxq->rx_started))
171                 return 0;
172
173         if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
174                 bnxt_rxq_rearm(rxq, rxr);
175
176         /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
177         nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
178
179         cons = raw_cons & (cp_ring_size - 1);
180         mbcons = (raw_cons / 2) & (rx_ring_size - 1);
181
182         /* Prefetch first four descriptor pairs. */
183         rte_prefetch0(&cp_desc_ring[cons]);
184         rte_prefetch0(&cp_desc_ring[cons + 4]);
185
186         /* Ensure that we do not go past the ends of the rings. */
187         nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
188                                            (cp_ring_size - cons) / 2));
189         /*
190          * If we are at the end of the ring, ensure that descriptors after the
191          * last valid entry are not treated as valid. Otherwise, force the
192          * maximum number of packets to receive to be a multiple of the per-
193          * loop count.
194          */
195         if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
196                 desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
197         else
198                 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
199
200         /* Handle RX burst request */
201         for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
202                                   cons += RTE_BNXT_DESCS_PER_LOOP * 2,
203                                   mbcons += RTE_BNXT_DESCS_PER_LOOP) {
204                 uint32x4_t rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
205                 uint32x4_t rxcmp[RTE_BNXT_DESCS_PER_LOOP];
206                 uint32x4_t info3_v;
207                 uint64x2_t t0, t1;
208                 uint32_t num_valid;
209
210                 /* Copy four mbuf pointers to output array. */
211                 t0 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons]);
212 #ifdef RTE_ARCH_ARM64
213                 t1 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons + 2]);
214 #endif
215                 vst1q_u64((void *)&rx_pkts[i], t0);
216 #ifdef RTE_ARCH_ARM64
217                 vst1q_u64((void *)&rx_pkts[i + 2], t1);
218 #endif
219
220                 /* Prefetch four descriptor pairs for next iteration. */
221                 if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
222                         rte_prefetch0(&cp_desc_ring[cons + 8]);
223                         rte_prefetch0(&cp_desc_ring[cons + 12]);
224                 }
225
226                 /*
227                  * Load the four current descriptors into SSE registers in
228                  * reverse order to ensure consistent state.
229                  */
230                 rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]);
231                 rte_io_rmb();
232                 rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]);
233
234                 rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]);
235                 rte_io_rmb();
236                 rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]);
237
238                 t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3]));
239
240                 rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]);
241                 rte_io_rmb();
242                 rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]);
243
244                 rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
245                 rte_io_rmb();
246                 rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]);
247
248                 t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1]));
249
250                 /* Isolate descriptor status flags. */
251                 info3_v = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
252                                                              vget_low_u64(t1)));
253                 info3_v = vandq_u32(info3_v, info3_v_mask);
254                 info3_v = veorq_u32(info3_v, valid_target);
255
256                 /*
257                  * Pack the 128-bit array of valid descriptor flags into 64
258                  * bits and count the number of set bits in order to determine
259                  * the number of valid descriptors.
260                  */
261                 valid = vget_lane_u64(vreinterpret_u64_u16(vqmovn_u32(info3_v)),
262                                       0);
263                 /*
264                  * At this point, 'valid' is a 64-bit value containing four
265                  * 16-bit fields, each of which is either 0x0001 or 0x0000.
266                  * Compute number of valid descriptors from the index of
267                  * the highest non-zero field.
268                  */
269                 num_valid = (sizeof(uint64_t) / sizeof(uint16_t)) -
270                                 (__builtin_clzl(valid & desc_valid_mask) / 16);
271
272                 switch (num_valid) {
273                 case 4:
274                         rxr->rx_buf_ring[mbcons + 3] = NULL;
275                         /* FALLTHROUGH */
276                 case 3:
277                         rxr->rx_buf_ring[mbcons + 2] = NULL;
278                         /* FALLTHROUGH */
279                 case 2:
280                         rxr->rx_buf_ring[mbcons + 1] = NULL;
281                         /* FALLTHROUGH */
282                 case 1:
283                         rxr->rx_buf_ring[mbcons + 0] = NULL;
284                         break;
285                 case 0:
286                         goto out;
287                 }
288
289                 descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts]);
290                 nb_rx_pkts += num_valid;
291
292                 if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
293                         break;
294         }
295
296 out:
297         if (nb_rx_pkts) {
298                 rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
299
300                 rxq->rxrearm_nb += nb_rx_pkts;
301                 cpr->cp_raw_cons += 2 * nb_rx_pkts;
302                 cpr->valid =
303                         !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
304                 bnxt_db_cq(cpr);
305         }
306
307         return nb_rx_pkts;
308 }
309
310 static void
311 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
312 {
313         struct bnxt_cp_ring_info *cpr = txq->cp_ring;
314         uint32_t raw_cons = cpr->cp_raw_cons;
315         uint32_t cons;
316         uint32_t nb_tx_pkts = 0;
317         struct tx_cmpl *txcmp;
318         struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
319         struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
320         uint32_t ring_mask = cp_ring_struct->ring_mask;
321
322         do {
323                 cons = RING_CMPL(ring_mask, raw_cons);
324                 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
325
326                 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
327                         break;
328
329                 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
330                         nb_tx_pkts += txcmp->opaque;
331                 else
332                         RTE_LOG_DP(ERR, PMD,
333                                    "Unhandled CMP type %02x\n",
334                                    CMP_TYPE(txcmp));
335                 raw_cons = NEXT_RAW_CMP(raw_cons);
336         } while (nb_tx_pkts < ring_mask);
337
338         cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
339         if (nb_tx_pkts) {
340                 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
341                         bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
342                 else
343                         bnxt_tx_cmp_vec(txq, nb_tx_pkts);
344                 cpr->cp_raw_cons = raw_cons;
345                 bnxt_db_cq(cpr);
346         }
347 }
348
349 static uint16_t
350 bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
351                           uint16_t nb_pkts)
352 {
353         struct bnxt_tx_queue *txq = tx_queue;
354         struct bnxt_tx_ring_info *txr = txq->tx_ring;
355         uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
356         struct rte_mbuf *tx_mbuf;
357         struct tx_bd_long *txbd = NULL;
358         struct bnxt_sw_tx_bd *tx_buf;
359         uint16_t to_send;
360
361         nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
362
363         if (unlikely(nb_pkts == 0))
364                 return 0;
365
366         /* Handle TX burst request */
367         to_send = nb_pkts;
368         while (to_send) {
369                 tx_mbuf = *tx_pkts++;
370                 rte_prefetch0(tx_mbuf);
371
372                 tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
373                 tx_buf = &txr->tx_buf_ring[tx_prod];
374                 tx_buf->mbuf = tx_mbuf;
375                 tx_buf->nr_bds = 1;
376
377                 txbd = &txr->tx_desc_ring[tx_prod];
378                 txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
379                 txbd->len = tx_mbuf->data_len;
380                 txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
381                                                        TX_BD_FLAGS_NOCMPL);
382                 tx_raw_prod = RING_NEXT(tx_raw_prod);
383                 to_send--;
384         }
385
386         /* Request a completion for last packet in burst */
387         if (txbd) {
388                 txbd->opaque = nb_pkts;
389                 txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
390         }
391
392         rte_compiler_barrier();
393         bnxt_db_write(&txr->tx_db, tx_raw_prod);
394
395         txr->tx_raw_prod = tx_raw_prod;
396
397         return nb_pkts;
398 }
399
400 uint16_t
401 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
402                    uint16_t nb_pkts)
403 {
404         int nb_sent = 0;
405         struct bnxt_tx_queue *txq = tx_queue;
406
407         /* Tx queue was stopped; wait for it to be restarted */
408         if (unlikely(!txq->tx_started)) {
409                 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
410                 return 0;
411         }
412
413         /* Handle TX completions */
414         if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
415                 bnxt_handle_tx_cp_vec(txq);
416
417         while (nb_pkts) {
418                 uint16_t ret, num;
419
420                 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
421                 ret = bnxt_xmit_fixed_burst_vec(tx_queue,
422                                                 &tx_pkts[nb_sent],
423                                                 num);
424                 nb_sent += ret;
425                 nb_pkts -= ret;
426                 if (ret < num)
427                         break;
428         }
429
430         return nb_sent;
431 }
432
433 int __rte_cold
434 bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
435 {
436         return bnxt_rxq_vec_setup_common(rxq);
437 }