net/cnxk: add vector Tx for CN9K
[dpdk.git] / drivers / net / cnxk / cn9k_tx.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #ifndef __CN9K_TX_H__
5 #define __CN9K_TX_H__
6
7 #include <rte_vect.h>
8
9 #define NIX_TX_OFFLOAD_NONE           (0)
10 #define NIX_TX_OFFLOAD_L3_L4_CSUM_F   BIT(0)
11 #define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
12 #define NIX_TX_OFFLOAD_VLAN_QINQ_F    BIT(2)
13 #define NIX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)
14 #define NIX_TX_OFFLOAD_TSO_F          BIT(4)
15
16 /* Flags to control xmit_prepare function.
17  * Defining it from backwards to denote its been
18  * not used as offload flags to pick function
19  */
20 #define NIX_TX_MULTI_SEG_F BIT(15)
21
22 #define NIX_TX_NEED_SEND_HDR_W1                                                \
23         (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |         \
24          NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
25
26 #define NIX_TX_NEED_EXT_HDR                                                    \
27         (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
28
29 #define NIX_XMIT_FC_OR_RETURN(txq, pkts)                                       \
30         do {                                                                   \
31                 /* Cached value is low, Update the fc_cache_pkts */            \
32                 if (unlikely((txq)->fc_cache_pkts < (pkts))) {                 \
33                         /* Multiply with sqe_per_sqb to express in pkts */     \
34                         (txq)->fc_cache_pkts =                                 \
35                                 ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem)      \
36                                 << (txq)->sqes_per_sqb_log2;                   \
37                         /* Check it again for the room */                      \
38                         if (unlikely((txq)->fc_cache_pkts < (pkts)))           \
39                                 return 0;                                      \
40                 }                                                              \
41         } while (0)
42
43 /* Function to determine no of tx subdesc required in case ext
44  * sub desc is enabled.
45  */
46 static __rte_always_inline int
47 cn9k_nix_tx_ext_subs(const uint16_t flags)
48 {
49         return (flags &
50                 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ? 1 : 0;
51 }
52
53 static __rte_always_inline void
54 cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
55 {
56         uint64_t mask, ol_flags = m->ol_flags;
57
58         if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
59                 uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
60                 uint16_t *iplen, *oiplen, *oudplen;
61                 uint16_t lso_sb, paylen;
62
63                 mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
64                 lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
65                          m->l2_len + m->l3_len + m->l4_len;
66
67                 /* Reduce payload len from base headers */
68                 paylen = m->pkt_len - lso_sb;
69
70                 /* Get iplen position assuming no tunnel hdr */
71                 iplen = (uint16_t *)(mdata + m->l2_len +
72                                      (2 << !!(ol_flags & PKT_TX_IPV6)));
73                 /* Handle tunnel tso */
74                 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
75                     (ol_flags & PKT_TX_TUNNEL_MASK)) {
76                         const uint8_t is_udp_tun =
77                                 (CNXK_NIX_UDP_TUN_BITMASK >>
78                                  ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
79                                 0x1;
80
81                         oiplen = (uint16_t *)(mdata + m->outer_l2_len +
82                                               (2 << !!(ol_flags &
83                                                        PKT_TX_OUTER_IPV6)));
84                         *oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
85                                                    paylen);
86
87                         /* Update format for UDP tunneled packet */
88                         if (is_udp_tun) {
89                                 oudplen = (uint16_t *)(mdata + m->outer_l2_len +
90                                                        m->outer_l3_len + 4);
91                                 *oudplen = rte_cpu_to_be_16(
92                                         rte_be_to_cpu_16(*oudplen) - paylen);
93                         }
94
95                         /* Update iplen position to inner ip hdr */
96                         iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
97                                              m->l4_len +
98                                              (2 << !!(ol_flags & PKT_TX_IPV6)));
99                 }
100
101                 *iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
102         }
103 }
104
105 static __rte_always_inline void
106 cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
107                       const uint64_t lso_tun_fmt)
108 {
109         struct nix_send_ext_s *send_hdr_ext;
110         struct nix_send_hdr_s *send_hdr;
111         uint64_t ol_flags = 0, mask;
112         union nix_send_hdr_w1_u w1;
113         union nix_send_sg_s *sg;
114
115         send_hdr = (struct nix_send_hdr_s *)cmd;
116         if (flags & NIX_TX_NEED_EXT_HDR) {
117                 send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);
118                 sg = (union nix_send_sg_s *)(cmd + 4);
119                 /* Clear previous markings */
120                 send_hdr_ext->w0.lso = 0;
121                 send_hdr_ext->w1.u = 0;
122         } else {
123                 sg = (union nix_send_sg_s *)(cmd + 2);
124         }
125
126         if (flags & NIX_TX_NEED_SEND_HDR_W1) {
127                 ol_flags = m->ol_flags;
128                 w1.u = 0;
129         }
130
131         if (!(flags & NIX_TX_MULTI_SEG_F)) {
132                 send_hdr->w0.total = m->data_len;
133                 send_hdr->w0.aura =
134                         roc_npa_aura_handle_to_aura(m->pool->pool_id);
135         }
136
137         /*
138          * L3type:  2 => IPV4
139          *          3 => IPV4 with csum
140          *          4 => IPV6
141          * L3type and L3ptr needs to be set for either
142          * L3 csum or L4 csum or LSO
143          *
144          */
145
146         if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
147             (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
148                 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
149                 const uint8_t ol3type =
150                         ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
151                         ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
152                         !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
153
154                 /* Outer L3 */
155                 w1.ol3type = ol3type;
156                 mask = 0xffffull << ((!!ol3type) << 4);
157                 w1.ol3ptr = ~mask & m->outer_l2_len;
158                 w1.ol4ptr = ~mask & (w1.ol3ptr + m->outer_l3_len);
159
160                 /* Outer L4 */
161                 w1.ol4type = csum + (csum << 1);
162
163                 /* Inner L3 */
164                 w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
165                              ((!!(ol_flags & PKT_TX_IPV6)) << 2);
166                 w1.il3ptr = w1.ol4ptr + m->l2_len;
167                 w1.il4ptr = w1.il3ptr + m->l3_len;
168                 /* Increment it by 1 if it is IPV4 as 3 is with csum */
169                 w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
170
171                 /* Inner L4 */
172                 w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
173
174                 /* In case of no tunnel header use only
175                  * shift IL3/IL4 fields a bit to use
176                  * OL3/OL4 for header checksum
177                  */
178                 mask = !ol3type;
179                 w1.u = ((w1.u & 0xFFFFFFFF00000000) >> (mask << 3)) |
180                        ((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
181
182         } else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
183                 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
184                 const uint8_t outer_l2_len = m->outer_l2_len;
185
186                 /* Outer L3 */
187                 w1.ol3ptr = outer_l2_len;
188                 w1.ol4ptr = outer_l2_len + m->outer_l3_len;
189                 /* Increment it by 1 if it is IPV4 as 3 is with csum */
190                 w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
191                              ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
192                              !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
193
194                 /* Outer L4 */
195                 w1.ol4type = csum + (csum << 1);
196
197         } else if (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) {
198                 const uint8_t l2_len = m->l2_len;
199
200                 /* Always use OLXPTR and OLXTYPE when only
201                  * when one header is present
202                  */
203
204                 /* Inner L3 */
205                 w1.ol3ptr = l2_len;
206                 w1.ol4ptr = l2_len + m->l3_len;
207                 /* Increment it by 1 if it is IPV4 as 3 is with csum */
208                 w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
209                              ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
210                              !!(ol_flags & PKT_TX_IP_CKSUM);
211
212                 /* Inner L4 */
213                 w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
214         }
215
216         if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
217                 send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
218                 /* HW will update ptr after vlan0 update */
219                 send_hdr_ext->w1.vlan1_ins_ptr = 12;
220                 send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
221
222                 send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
223                 /* 2B before end of l2 header */
224                 send_hdr_ext->w1.vlan0_ins_ptr = 12;
225                 send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
226         }
227
228         if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
229                 uint16_t lso_sb;
230                 uint64_t mask;
231
232                 mask = -(!w1.il3type);
233                 lso_sb = (mask & w1.ol4ptr) + (~mask & w1.il4ptr) + m->l4_len;
234
235                 send_hdr_ext->w0.lso_sb = lso_sb;
236                 send_hdr_ext->w0.lso = 1;
237                 send_hdr_ext->w0.lso_mps = m->tso_segsz;
238                 send_hdr_ext->w0.lso_format =
239                         NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
240                 w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
241
242                 /* Handle tunnel tso */
243                 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
244                     (ol_flags & PKT_TX_TUNNEL_MASK)) {
245                         const uint8_t is_udp_tun =
246                                 (CNXK_NIX_UDP_TUN_BITMASK >>
247                                  ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
248                                 0x1;
249                         uint8_t shift = is_udp_tun ? 32 : 0;
250
251                         shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
252                         shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
253
254                         w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
255                         w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
256                         /* Update format for UDP tunneled packet */
257                         send_hdr_ext->w0.lso_format = (lso_tun_fmt >> shift);
258                 }
259         }
260
261         if (flags & NIX_TX_NEED_SEND_HDR_W1)
262                 send_hdr->w1.u = w1.u;
263
264         if (!(flags & NIX_TX_MULTI_SEG_F)) {
265                 sg->seg1_size = m->data_len;
266                 *(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
267
268                 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
269                         /* DF bit = 1 if refcount of current mbuf or parent mbuf
270                          *              is greater than 1
271                          * DF bit = 0 otherwise
272                          */
273                         send_hdr->w0.df = cnxk_nix_prefree_seg(m);
274                         /* Ensuring mbuf fields which got updated in
275                          * cnxk_nix_prefree_seg are written before LMTST.
276                          */
277                         rte_io_wmb();
278                 }
279                 /* Mark mempool object as "put" since it is freed by NIX */
280                 if (!send_hdr->w0.df)
281                         __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
282         }
283 }
284
285 static __rte_always_inline void
286 cn9k_nix_xmit_one(uint64_t *cmd, void *lmt_addr, const rte_iova_t io_addr,
287                   const uint32_t flags)
288 {
289         uint64_t lmt_status;
290
291         do {
292                 roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));
293                 lmt_status = roc_lmt_submit_ldeor(io_addr);
294         } while (lmt_status == 0);
295 }
296
297 static __rte_always_inline void
298 cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags)
299 {
300         roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));
301 }
302
303 static __rte_always_inline uint64_t
304 cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr)
305 {
306         return roc_lmt_submit_ldeor(io_addr);
307 }
308
309 static __rte_always_inline uint64_t
310 cn9k_nix_xmit_submit_lmt_release(const rte_iova_t io_addr)
311 {
312         return roc_lmt_submit_ldeorl(io_addr);
313 }
314
315 static __rte_always_inline uint16_t
316 cn9k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
317 {
318         struct nix_send_hdr_s *send_hdr;
319         union nix_send_sg_s *sg;
320         struct rte_mbuf *m_next;
321         uint64_t *slist, sg_u;
322         uint64_t nb_segs;
323         uint64_t segdw;
324         uint8_t off, i;
325
326         send_hdr = (struct nix_send_hdr_s *)cmd;
327         send_hdr->w0.total = m->pkt_len;
328         send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
329
330         if (flags & NIX_TX_NEED_EXT_HDR)
331                 off = 2;
332         else
333                 off = 0;
334
335         sg = (union nix_send_sg_s *)&cmd[2 + off];
336         /* Clear sg->u header before use */
337         sg->u &= 0xFC00000000000000;
338         sg_u = sg->u;
339         slist = &cmd[3 + off];
340
341         i = 0;
342         nb_segs = m->nb_segs;
343
344         /* Fill mbuf segments */
345         do {
346                 m_next = m->next;
347                 sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
348                 *slist = rte_mbuf_data_iova(m);
349                 /* Set invert df if buffer is not to be freed by H/W */
350                 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
351                         sg_u |= (cnxk_nix_prefree_seg(m) << (i + 55));
352                         /* Commit changes to mbuf */
353                         rte_io_wmb();
354                 }
355                 /* Mark mempool object as "put" since it is freed by NIX */
356 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
357                 if (!(sg_u & (1ULL << (i + 55))))
358                         __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
359                 rte_io_wmb();
360 #endif
361                 slist++;
362                 i++;
363                 nb_segs--;
364                 if (i > 2 && nb_segs) {
365                         i = 0;
366                         /* Next SG subdesc */
367                         *(uint64_t *)slist = sg_u & 0xFC00000000000000;
368                         sg->u = sg_u;
369                         sg->segs = 3;
370                         sg = (union nix_send_sg_s *)slist;
371                         sg_u = sg->u;
372                         slist++;
373                 }
374                 m = m_next;
375         } while (nb_segs);
376
377         sg->u = sg_u;
378         sg->segs = i;
379         segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
380         /* Roundup extra dwords to multiple of 2 */
381         segdw = (segdw >> 1) + (segdw & 0x1);
382         /* Default dwords */
383         segdw += (off >> 1) + 1;
384         send_hdr->w0.sizem1 = segdw - 1;
385
386         return segdw;
387 }
388
389 static __rte_always_inline void
390 cn9k_nix_xmit_mseg_prep_lmt(uint64_t *cmd, void *lmt_addr, uint16_t segdw)
391 {
392         roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
393 }
394
395 static __rte_always_inline void
396 cn9k_nix_xmit_mseg_one(uint64_t *cmd, void *lmt_addr, rte_iova_t io_addr,
397                        uint16_t segdw)
398 {
399         uint64_t lmt_status;
400
401         do {
402                 roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
403                 lmt_status = roc_lmt_submit_ldeor(io_addr);
404         } while (lmt_status == 0);
405 }
406
407 static __rte_always_inline void
408 cn9k_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,
409                                rte_iova_t io_addr, uint16_t segdw)
410 {
411         uint64_t lmt_status;
412
413         rte_io_wmb();
414         do {
415                 roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
416                 lmt_status = roc_lmt_submit_ldeor(io_addr);
417         } while (lmt_status == 0);
418 }
419
420 static __rte_always_inline uint16_t
421 cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
422                    uint64_t *cmd, const uint16_t flags)
423 {
424         struct cn9k_eth_txq *txq = tx_queue;
425         const rte_iova_t io_addr = txq->io_addr;
426         void *lmt_addr = txq->lmt_addr;
427         uint64_t lso_tun_fmt;
428         uint16_t i;
429
430         NIX_XMIT_FC_OR_RETURN(txq, pkts);
431
432         roc_lmt_mov(cmd, &txq->cmd[0], cn9k_nix_tx_ext_subs(flags));
433
434         /* Perform header writes before barrier for TSO */
435         if (flags & NIX_TX_OFFLOAD_TSO_F) {
436                 lso_tun_fmt = txq->lso_tun_fmt;
437
438                 for (i = 0; i < pkts; i++)
439                         cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
440         }
441
442         /* Lets commit any changes in the packet here as no further changes
443          * to the packet will be done unless no fast free is enabled.
444          */
445         if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
446                 rte_io_wmb();
447
448         for (i = 0; i < pkts; i++) {
449                 cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
450                 cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
451         }
452
453         /* Reduce the cached count */
454         txq->fc_cache_pkts -= pkts;
455
456         return pkts;
457 }
458
459 static __rte_always_inline uint16_t
460 cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
461                         uint16_t pkts, uint64_t *cmd, const uint16_t flags)
462 {
463         struct cn9k_eth_txq *txq = tx_queue;
464         const rte_iova_t io_addr = txq->io_addr;
465         void *lmt_addr = txq->lmt_addr;
466         uint64_t lso_tun_fmt;
467         uint16_t segdw;
468         uint64_t i;
469
470         NIX_XMIT_FC_OR_RETURN(txq, pkts);
471
472         roc_lmt_mov(cmd, &txq->cmd[0], cn9k_nix_tx_ext_subs(flags));
473
474         /* Perform header writes before barrier for TSO */
475         if (flags & NIX_TX_OFFLOAD_TSO_F) {
476                 lso_tun_fmt = txq->lso_tun_fmt;
477
478                 for (i = 0; i < pkts; i++)
479                         cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
480         }
481
482         /* Lets commit any changes in the packet here as no further changes
483          * to the packet will be done unless no fast free is enabled.
484          */
485         if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
486                 rte_io_wmb();
487
488         for (i = 0; i < pkts; i++) {
489                 cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
490                 segdw = cn9k_nix_prepare_mseg(tx_pkts[i], cmd, flags);
491                 cn9k_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
492         }
493
494         /* Reduce the cached count */
495         txq->fc_cache_pkts -= pkts;
496
497         return pkts;
498 }
499
500 #if defined(RTE_ARCH_ARM64)
501
502 #define NIX_DESCS_PER_LOOP 4
503 static __rte_always_inline uint16_t
504 cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
505                           uint16_t pkts, uint64_t *cmd, const uint16_t flags)
506 {
507         uint64x2_t dataoff_iova0, dataoff_iova1, dataoff_iova2, dataoff_iova3;
508         uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3;
509         uint64x2_t cmd0[NIX_DESCS_PER_LOOP], cmd1[NIX_DESCS_PER_LOOP];
510         uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3;
511         uint64x2_t senddesc01_w0, senddesc23_w0;
512         uint64x2_t senddesc01_w1, senddesc23_w1;
513         uint64x2_t sgdesc01_w0, sgdesc23_w0;
514         uint64x2_t sgdesc01_w1, sgdesc23_w1;
515         struct cn9k_eth_txq *txq = tx_queue;
516         uint64_t *lmt_addr = txq->lmt_addr;
517         rte_iova_t io_addr = txq->io_addr;
518         uint64x2_t ltypes01, ltypes23;
519         uint64x2_t xtmp128, ytmp128;
520         uint64x2_t xmask01, xmask23;
521         uint64_t lmt_status, i;
522         uint16_t pkts_left;
523
524         NIX_XMIT_FC_OR_RETURN(txq, pkts);
525
526         pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
527         pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
528
529         /* Reduce the cached count */
530         txq->fc_cache_pkts -= pkts;
531
532         /* Lets commit any changes in the packet here as no further changes
533          * to the packet will be done unless no fast free is enabled.
534          */
535         if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
536                 rte_io_wmb();
537
538         senddesc01_w0 = vld1q_dup_u64(&txq->cmd[0]);
539         senddesc23_w0 = senddesc01_w0;
540         senddesc01_w1 = vdupq_n_u64(0);
541         senddesc23_w1 = senddesc01_w1;
542         sgdesc01_w0 = vld1q_dup_u64(&txq->cmd[2]);
543         sgdesc23_w0 = sgdesc01_w0;
544
545         for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
546                 /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
547                 senddesc01_w0 =
548                         vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
549                 sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
550
551                 senddesc23_w0 = senddesc01_w0;
552                 sgdesc23_w0 = sgdesc01_w0;
553
554                 /* Move mbufs to iova */
555                 mbuf0 = (uint64_t *)tx_pkts[0];
556                 mbuf1 = (uint64_t *)tx_pkts[1];
557                 mbuf2 = (uint64_t *)tx_pkts[2];
558                 mbuf3 = (uint64_t *)tx_pkts[3];
559
560                 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
561                                      offsetof(struct rte_mbuf, buf_iova));
562                 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
563                                      offsetof(struct rte_mbuf, buf_iova));
564                 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
565                                      offsetof(struct rte_mbuf, buf_iova));
566                 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
567                                      offsetof(struct rte_mbuf, buf_iova));
568                 /*
569                  * Get mbuf's, olflags, iova, pktlen, dataoff
570                  * dataoff_iovaX.D[0] = iova,
571                  * dataoff_iovaX.D[1](15:0) = mbuf->dataoff
572                  * len_olflagsX.D[0] = ol_flags,
573                  * len_olflagsX.D[1](63:32) = mbuf->pkt_len
574                  */
575                 dataoff_iova0 = vld1q_u64(mbuf0);
576                 len_olflags0 = vld1q_u64(mbuf0 + 2);
577                 dataoff_iova1 = vld1q_u64(mbuf1);
578                 len_olflags1 = vld1q_u64(mbuf1 + 2);
579                 dataoff_iova2 = vld1q_u64(mbuf2);
580                 len_olflags2 = vld1q_u64(mbuf2 + 2);
581                 dataoff_iova3 = vld1q_u64(mbuf3);
582                 len_olflags3 = vld1q_u64(mbuf3 + 2);
583
584                 /* Move mbufs to point pool */
585                 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
586                                      offsetof(struct rte_mbuf, pool) -
587                                      offsetof(struct rte_mbuf, buf_iova));
588                 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
589                                      offsetof(struct rte_mbuf, pool) -
590                                      offsetof(struct rte_mbuf, buf_iova));
591                 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
592                                      offsetof(struct rte_mbuf, pool) -
593                                      offsetof(struct rte_mbuf, buf_iova));
594                 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
595                                      offsetof(struct rte_mbuf, pool) -
596                                      offsetof(struct rte_mbuf, buf_iova));
597
598                 if (flags & (NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
599                              NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
600                         /* Get tx_offload for ol2, ol3, l2, l3 lengths */
601                         /*
602                          * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
603                          * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
604                          */
605
606                         asm volatile("LD1 {%[a].D}[0],[%[in]]\n\t"
607                                      : [a] "+w"(senddesc01_w1)
608                                      : [in] "r"(mbuf0 + 2)
609                                      : "memory");
610
611                         asm volatile("LD1 {%[a].D}[1],[%[in]]\n\t"
612                                      : [a] "+w"(senddesc01_w1)
613                                      : [in] "r"(mbuf1 + 2)
614                                      : "memory");
615
616                         asm volatile("LD1 {%[b].D}[0],[%[in]]\n\t"
617                                      : [b] "+w"(senddesc23_w1)
618                                      : [in] "r"(mbuf2 + 2)
619                                      : "memory");
620
621                         asm volatile("LD1 {%[b].D}[1],[%[in]]\n\t"
622                                      : [b] "+w"(senddesc23_w1)
623                                      : [in] "r"(mbuf3 + 2)
624                                      : "memory");
625
626                         /* Get pool pointer alone */
627                         mbuf0 = (uint64_t *)*mbuf0;
628                         mbuf1 = (uint64_t *)*mbuf1;
629                         mbuf2 = (uint64_t *)*mbuf2;
630                         mbuf3 = (uint64_t *)*mbuf3;
631                 } else {
632                         /* Get pool pointer alone */
633                         mbuf0 = (uint64_t *)*mbuf0;
634                         mbuf1 = (uint64_t *)*mbuf1;
635                         mbuf2 = (uint64_t *)*mbuf2;
636                         mbuf3 = (uint64_t *)*mbuf3;
637                 }
638
639                 const uint8x16_t shuf_mask2 = {
640                         0x4, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
641                         0xc, 0xd, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
642                 };
643                 xtmp128 = vzip2q_u64(len_olflags0, len_olflags1);
644                 ytmp128 = vzip2q_u64(len_olflags2, len_olflags3);
645
646                 /* Clear dataoff_iovaX.D[1] bits other than dataoff(15:0) */
647                 const uint64x2_t and_mask0 = {
648                         0xFFFFFFFFFFFFFFFF,
649                         0x000000000000FFFF,
650                 };
651
652                 dataoff_iova0 = vandq_u64(dataoff_iova0, and_mask0);
653                 dataoff_iova1 = vandq_u64(dataoff_iova1, and_mask0);
654                 dataoff_iova2 = vandq_u64(dataoff_iova2, and_mask0);
655                 dataoff_iova3 = vandq_u64(dataoff_iova3, and_mask0);
656
657                 /*
658                  * Pick only 16 bits of pktlen preset at bits 63:32
659                  * and place them at bits 15:0.
660                  */
661                 xtmp128 = vqtbl1q_u8(xtmp128, shuf_mask2);
662                 ytmp128 = vqtbl1q_u8(ytmp128, shuf_mask2);
663
664                 /* Add pairwise to get dataoff + iova in sgdesc_w1 */
665                 sgdesc01_w1 = vpaddq_u64(dataoff_iova0, dataoff_iova1);
666                 sgdesc23_w1 = vpaddq_u64(dataoff_iova2, dataoff_iova3);
667
668                 /* Orr both sgdesc_w0 and senddesc_w0 with 16 bits of
669                  * pktlen at 15:0 position.
670                  */
671                 sgdesc01_w0 = vorrq_u64(sgdesc01_w0, xtmp128);
672                 sgdesc23_w0 = vorrq_u64(sgdesc23_w0, ytmp128);
673                 senddesc01_w0 = vorrq_u64(senddesc01_w0, xtmp128);
674                 senddesc23_w0 = vorrq_u64(senddesc23_w0, ytmp128);
675
676                 /* Move mbuf to point to pool_id. */
677                 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
678                                      offsetof(struct rte_mempool, pool_id));
679                 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
680                                      offsetof(struct rte_mempool, pool_id));
681                 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
682                                      offsetof(struct rte_mempool, pool_id));
683                 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
684                                      offsetof(struct rte_mempool, pool_id));
685
686                 if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
687                     !(flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
688                         /*
689                          * Lookup table to translate ol_flags to
690                          * il3/il4 types. But we still use ol3/ol4 types in
691                          * senddesc_w1 as only one header processing is enabled.
692                          */
693                         const uint8x16_t tbl = {
694                                 /* [0-15] = il4type:il3type */
695                                 0x04, /* none (IPv6 assumed) */
696                                 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
697                                 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
698                                 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
699                                 0x03, /* PKT_TX_IP_CKSUM */
700                                 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
701                                 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
702                                 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
703                                 0x02, /* PKT_TX_IPV4  */
704                                 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
705                                 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
706                                 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
707                                 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
708                                 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
709                                        * PKT_TX_TCP_CKSUM
710                                        */
711                                 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
712                                        * PKT_TX_SCTP_CKSUM
713                                        */
714                                 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
715                                        * PKT_TX_UDP_CKSUM
716                                        */
717                         };
718
719                         /* Extract olflags to translate to iltypes */
720                         xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
721                         ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
722
723                         /*
724                          * E(47):L3_LEN(9):L2_LEN(7+z)
725                          * E(47):L3_LEN(9):L2_LEN(7+z)
726                          */
727                         senddesc01_w1 = vshlq_n_u64(senddesc01_w1, 1);
728                         senddesc23_w1 = vshlq_n_u64(senddesc23_w1, 1);
729
730                         /* Move OLFLAGS bits 55:52 to 51:48
731                          * with zeros preprended on the byte and rest
732                          * don't care
733                          */
734                         xtmp128 = vshrq_n_u8(xtmp128, 4);
735                         ytmp128 = vshrq_n_u8(ytmp128, 4);
736                         /*
737                          * E(48):L3_LEN(8):L2_LEN(z+7)
738                          * E(48):L3_LEN(8):L2_LEN(z+7)
739                          */
740                         const int8x16_t tshft3 = {
741                                 -1, 0, 8, 8, 8, 8, 8, 8,
742                                 -1, 0, 8, 8, 8, 8, 8, 8,
743                         };
744
745                         senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
746                         senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
747
748                         /* Do the lookup */
749                         ltypes01 = vqtbl1q_u8(tbl, xtmp128);
750                         ltypes23 = vqtbl1q_u8(tbl, ytmp128);
751
752                         /* Pick only relevant fields i.e Bit 48:55 of iltype
753                          * and place it in ol3/ol4type of senddesc_w1
754                          */
755                         const uint8x16_t shuf_mask0 = {
756                                 0xFF, 0xFF, 0xFF, 0xFF, 0x6, 0xFF, 0xFF, 0xFF,
757                                 0xFF, 0xFF, 0xFF, 0xFF, 0xE, 0xFF, 0xFF, 0xFF,
758                         };
759
760                         ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
761                         ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
762
763                         /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
764                          * a [E(32):E(16):OL3(8):OL2(8)]
765                          * a = a + (a << 8)
766                          * a [E(32):E(16):(OL3+OL2):OL2]
767                          * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
768                          */
769                         senddesc01_w1 = vaddq_u8(senddesc01_w1,
770                                                  vshlq_n_u16(senddesc01_w1, 8));
771                         senddesc23_w1 = vaddq_u8(senddesc23_w1,
772                                                  vshlq_n_u16(senddesc23_w1, 8));
773
774                         /* Move ltypes to senddesc*_w1 */
775                         senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
776                         senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
777                 } else if (!(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
778                            (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
779                         /*
780                          * Lookup table to translate ol_flags to
781                          * ol3/ol4 types.
782                          */
783
784                         const uint8x16_t tbl = {
785                                 /* [0-15] = ol4type:ol3type */
786                                 0x00, /* none */
787                                 0x03, /* OUTER_IP_CKSUM */
788                                 0x02, /* OUTER_IPV4 */
789                                 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
790                                 0x04, /* OUTER_IPV6 */
791                                 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
792                                 0x00, /* OUTER_IPV6 | OUTER_IPV4 */
793                                 0x00, /* OUTER_IPV6 | OUTER_IPV4 |
794                                        * OUTER_IP_CKSUM
795                                        */
796                                 0x00, /* OUTER_UDP_CKSUM */
797                                 0x33, /* OUTER_UDP_CKSUM | OUTER_IP_CKSUM */
798                                 0x32, /* OUTER_UDP_CKSUM | OUTER_IPV4 */
799                                 0x33, /* OUTER_UDP_CKSUM | OUTER_IPV4 |
800                                        * OUTER_IP_CKSUM
801                                        */
802                                 0x34, /* OUTER_UDP_CKSUM | OUTER_IPV6 */
803                                 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
804                                        * OUTER_IP_CKSUM
805                                        */
806                                 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
807                                        * OUTER_IPV4
808                                        */
809                                 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
810                                        * OUTER_IPV4 | OUTER_IP_CKSUM
811                                        */
812                         };
813
814                         /* Extract olflags to translate to iltypes */
815                         xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
816                         ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
817
818                         /*
819                          * E(47):OL3_LEN(9):OL2_LEN(7+z)
820                          * E(47):OL3_LEN(9):OL2_LEN(7+z)
821                          */
822                         const uint8x16_t shuf_mask5 = {
823                                 0x6, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
824                                 0xE, 0xD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
825                         };
826                         senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
827                         senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
828
829                         /* Extract outer ol flags only */
830                         const uint64x2_t o_cksum_mask = {
831                                 0x1C00020000000000,
832                                 0x1C00020000000000,
833                         };
834
835                         xtmp128 = vandq_u64(xtmp128, o_cksum_mask);
836                         ytmp128 = vandq_u64(ytmp128, o_cksum_mask);
837
838                         /* Extract OUTER_UDP_CKSUM bit 41 and
839                          * move it to bit 61
840                          */
841
842                         xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
843                         ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
844
845                         /* Shift oltype by 2 to start nibble from BIT(56)
846                          * instead of BIT(58)
847                          */
848                         xtmp128 = vshrq_n_u8(xtmp128, 2);
849                         ytmp128 = vshrq_n_u8(ytmp128, 2);
850                         /*
851                          * E(48):L3_LEN(8):L2_LEN(z+7)
852                          * E(48):L3_LEN(8):L2_LEN(z+7)
853                          */
854                         const int8x16_t tshft3 = {
855                                 -1, 0, 8, 8, 8, 8, 8, 8,
856                                 -1, 0, 8, 8, 8, 8, 8, 8,
857                         };
858
859                         senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
860                         senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
861
862                         /* Do the lookup */
863                         ltypes01 = vqtbl1q_u8(tbl, xtmp128);
864                         ltypes23 = vqtbl1q_u8(tbl, ytmp128);
865
866                         /* Pick only relevant fields i.e Bit 56:63 of oltype
867                          * and place it in ol3/ol4type of senddesc_w1
868                          */
869                         const uint8x16_t shuf_mask0 = {
870                                 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0xFF, 0xFF, 0xFF,
871                                 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xFF, 0xFF, 0xFF,
872                         };
873
874                         ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
875                         ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
876
877                         /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
878                          * a [E(32):E(16):OL3(8):OL2(8)]
879                          * a = a + (a << 8)
880                          * a [E(32):E(16):(OL3+OL2):OL2]
881                          * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
882                          */
883                         senddesc01_w1 = vaddq_u8(senddesc01_w1,
884                                                  vshlq_n_u16(senddesc01_w1, 8));
885                         senddesc23_w1 = vaddq_u8(senddesc23_w1,
886                                                  vshlq_n_u16(senddesc23_w1, 8));
887
888                         /* Move ltypes to senddesc*_w1 */
889                         senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
890                         senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
891                 } else if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
892                            (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
893                         /* Lookup table to translate ol_flags to
894                          * ol4type, ol3type, il4type, il3type of senddesc_w1
895                          */
896                         const uint8x16x2_t tbl = {{
897                                 {
898                                         /* [0-15] = il4type:il3type */
899                                         0x04, /* none (IPv6) */
900                                         0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
901                                         0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
902                                         0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
903                                         0x03, /* PKT_TX_IP_CKSUM */
904                                         0x13, /* PKT_TX_IP_CKSUM |
905                                                * PKT_TX_TCP_CKSUM
906                                                */
907                                         0x23, /* PKT_TX_IP_CKSUM |
908                                                * PKT_TX_SCTP_CKSUM
909                                                */
910                                         0x33, /* PKT_TX_IP_CKSUM |
911                                                * PKT_TX_UDP_CKSUM
912                                                */
913                                         0x02, /* PKT_TX_IPV4 */
914                                         0x12, /* PKT_TX_IPV4 |
915                                                * PKT_TX_TCP_CKSUM
916                                                */
917                                         0x22, /* PKT_TX_IPV4 |
918                                                * PKT_TX_SCTP_CKSUM
919                                                */
920                                         0x32, /* PKT_TX_IPV4 |
921                                                * PKT_TX_UDP_CKSUM
922                                                */
923                                         0x03, /* PKT_TX_IPV4 |
924                                                * PKT_TX_IP_CKSUM
925                                                */
926                                         0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
927                                                * PKT_TX_TCP_CKSUM
928                                                */
929                                         0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
930                                                * PKT_TX_SCTP_CKSUM
931                                                */
932                                         0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
933                                                * PKT_TX_UDP_CKSUM
934                                                */
935                                 },
936
937                                 {
938                                         /* [16-31] = ol4type:ol3type */
939                                         0x00, /* none */
940                                         0x03, /* OUTER_IP_CKSUM */
941                                         0x02, /* OUTER_IPV4 */
942                                         0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
943                                         0x04, /* OUTER_IPV6 */
944                                         0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
945                                         0x00, /* OUTER_IPV6 | OUTER_IPV4 */
946                                         0x00, /* OUTER_IPV6 | OUTER_IPV4 |
947                                                * OUTER_IP_CKSUM
948                                                */
949                                         0x00, /* OUTER_UDP_CKSUM */
950                                         0x33, /* OUTER_UDP_CKSUM |
951                                                * OUTER_IP_CKSUM
952                                                */
953                                         0x32, /* OUTER_UDP_CKSUM |
954                                                * OUTER_IPV4
955                                                */
956                                         0x33, /* OUTER_UDP_CKSUM |
957                                                * OUTER_IPV4 | OUTER_IP_CKSUM
958                                                */
959                                         0x34, /* OUTER_UDP_CKSUM |
960                                                * OUTER_IPV6
961                                                */
962                                         0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
963                                                * OUTER_IP_CKSUM
964                                                */
965                                         0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
966                                                * OUTER_IPV4
967                                                */
968                                         0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
969                                                * OUTER_IPV4 | OUTER_IP_CKSUM
970                                                */
971                                 },
972                         }};
973
974                         /* Extract olflags to translate to oltype & iltype */
975                         xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
976                         ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
977
978                         /*
979                          * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
980                          * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
981                          */
982                         const uint32x4_t tshft_4 = {
983                                 1,
984                                 0,
985                                 1,
986                                 0,
987                         };
988                         senddesc01_w1 = vshlq_u32(senddesc01_w1, tshft_4);
989                         senddesc23_w1 = vshlq_u32(senddesc23_w1, tshft_4);
990
991                         /*
992                          * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
993                          * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
994                          */
995                         const uint8x16_t shuf_mask5 = {
996                                 0x6, 0x5, 0x0, 0x1, 0xFF, 0xFF, 0xFF, 0xFF,
997                                 0xE, 0xD, 0x8, 0x9, 0xFF, 0xFF, 0xFF, 0xFF,
998                         };
999                         senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
1000                         senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
1001
1002                         /* Extract outer and inner header ol_flags */
1003                         const uint64x2_t oi_cksum_mask = {
1004                                 0x1CF0020000000000,
1005                                 0x1CF0020000000000,
1006                         };
1007
1008                         xtmp128 = vandq_u64(xtmp128, oi_cksum_mask);
1009                         ytmp128 = vandq_u64(ytmp128, oi_cksum_mask);
1010
1011                         /* Extract OUTER_UDP_CKSUM bit 41 and
1012                          * move it to bit 61
1013                          */
1014
1015                         xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
1016                         ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
1017
1018                         /* Shift right oltype by 2 and iltype by 4
1019                          * to start oltype nibble from BIT(58)
1020                          * instead of BIT(56) and iltype nibble from BIT(48)
1021                          * instead of BIT(52).
1022                          */
1023                         const int8x16_t tshft5 = {
1024                                 8, 8, 8, 8, 8, 8, -4, -2,
1025                                 8, 8, 8, 8, 8, 8, -4, -2,
1026                         };
1027
1028                         xtmp128 = vshlq_u8(xtmp128, tshft5);
1029                         ytmp128 = vshlq_u8(ytmp128, tshft5);
1030                         /*
1031                          * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
1032                          * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
1033                          */
1034                         const int8x16_t tshft3 = {
1035                                 -1, 0, -1, 0, 0, 0, 0, 0,
1036                                 -1, 0, -1, 0, 0, 0, 0, 0,
1037                         };
1038
1039                         senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
1040                         senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
1041
1042                         /* Mark Bit(4) of oltype */
1043                         const uint64x2_t oi_cksum_mask2 = {
1044                                 0x1000000000000000,
1045                                 0x1000000000000000,
1046                         };
1047
1048                         xtmp128 = vorrq_u64(xtmp128, oi_cksum_mask2);
1049                         ytmp128 = vorrq_u64(ytmp128, oi_cksum_mask2);
1050
1051                         /* Do the lookup */
1052                         ltypes01 = vqtbl2q_u8(tbl, xtmp128);
1053                         ltypes23 = vqtbl2q_u8(tbl, ytmp128);
1054
1055                         /* Pick only relevant fields i.e Bit 48:55 of iltype and
1056                          * Bit 56:63 of oltype and place it in corresponding
1057                          * place in senddesc_w1.
1058                          */
1059                         const uint8x16_t shuf_mask0 = {
1060                                 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0x6, 0xFF, 0xFF,
1061                                 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xE, 0xFF, 0xFF,
1062                         };
1063
1064                         ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
1065                         ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
1066
1067                         /* Prepare l4ptr, l3ptr, ol4ptr, ol3ptr from
1068                          * l3len, l2len, ol3len, ol2len.
1069                          * a [E(32):L3(8):L2(8):OL3(8):OL2(8)]
1070                          * a = a + (a << 8)
1071                          * a [E:(L3+L2):(L2+OL3):(OL3+OL2):OL2]
1072                          * a = a + (a << 16)
1073                          * a [E:(L3+L2+OL3+OL2):(L2+OL3+OL2):(OL3+OL2):OL2]
1074                          * => E(32):IL4PTR(8):IL3PTR(8):OL4PTR(8):OL3PTR(8)
1075                          */
1076                         senddesc01_w1 = vaddq_u8(senddesc01_w1,
1077                                                  vshlq_n_u32(senddesc01_w1, 8));
1078                         senddesc23_w1 = vaddq_u8(senddesc23_w1,
1079                                                  vshlq_n_u32(senddesc23_w1, 8));
1080
1081                         /* Continue preparing l4ptr, l3ptr, ol4ptr, ol3ptr */
1082                         senddesc01_w1 = vaddq_u8(
1083                                 senddesc01_w1, vshlq_n_u32(senddesc01_w1, 16));
1084                         senddesc23_w1 = vaddq_u8(
1085                                 senddesc23_w1, vshlq_n_u32(senddesc23_w1, 16));
1086
1087                         /* Move ltypes to senddesc*_w1 */
1088                         senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
1089                         senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
1090                 }
1091
1092                 xmask01 = vdupq_n_u64(0);
1093                 xmask23 = xmask01;
1094                 asm volatile("LD1 {%[a].H}[0],[%[in]]\n\t"
1095                              : [a] "+w"(xmask01)
1096                              : [in] "r"(mbuf0)
1097                              : "memory");
1098
1099                 asm volatile("LD1 {%[a].H}[4],[%[in]]\n\t"
1100                              : [a] "+w"(xmask01)
1101                              : [in] "r"(mbuf1)
1102                              : "memory");
1103
1104                 asm volatile("LD1 {%[b].H}[0],[%[in]]\n\t"
1105                              : [b] "+w"(xmask23)
1106                              : [in] "r"(mbuf2)
1107                              : "memory");
1108
1109                 asm volatile("LD1 {%[b].H}[4],[%[in]]\n\t"
1110                              : [b] "+w"(xmask23)
1111                              : [in] "r"(mbuf3)
1112                              : "memory");
1113                 xmask01 = vshlq_n_u64(xmask01, 20);
1114                 xmask23 = vshlq_n_u64(xmask23, 20);
1115
1116                 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
1117                 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
1118
1119                 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
1120                         /* Set don't free bit if reference count > 1 */
1121                         xmask01 = vdupq_n_u64(0);
1122                         xmask23 = xmask01;
1123
1124                         /* Move mbufs to iova */
1125                         mbuf0 = (uint64_t *)tx_pkts[0];
1126                         mbuf1 = (uint64_t *)tx_pkts[1];
1127                         mbuf2 = (uint64_t *)tx_pkts[2];
1128                         mbuf3 = (uint64_t *)tx_pkts[3];
1129
1130                         if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0))
1131                                 vsetq_lane_u64(0x80000, xmask01, 0);
1132                         else
1133                                 __mempool_check_cookies(
1134                                         ((struct rte_mbuf *)mbuf0)->pool,
1135                                         (void **)&mbuf0, 1, 0);
1136
1137                         if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1))
1138                                 vsetq_lane_u64(0x80000, xmask01, 1);
1139                         else
1140                                 __mempool_check_cookies(
1141                                         ((struct rte_mbuf *)mbuf1)->pool,
1142                                         (void **)&mbuf1, 1, 0);
1143
1144                         if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2))
1145                                 vsetq_lane_u64(0x80000, xmask23, 0);
1146                         else
1147                                 __mempool_check_cookies(
1148                                         ((struct rte_mbuf *)mbuf2)->pool,
1149                                         (void **)&mbuf2, 1, 0);
1150
1151                         if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3))
1152                                 vsetq_lane_u64(0x80000, xmask23, 1);
1153                         else
1154                                 __mempool_check_cookies(
1155                                         ((struct rte_mbuf *)mbuf3)->pool,
1156                                         (void **)&mbuf3, 1, 0);
1157                         senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
1158                         senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
1159                         /* Ensuring mbuf fields which got updated in
1160                          * cnxk_nix_prefree_seg are written before LMTST.
1161                          */
1162                         rte_io_wmb();
1163                 } else {
1164                         /* Move mbufs to iova */
1165                         mbuf0 = (uint64_t *)tx_pkts[0];
1166                         mbuf1 = (uint64_t *)tx_pkts[1];
1167                         mbuf2 = (uint64_t *)tx_pkts[2];
1168                         mbuf3 = (uint64_t *)tx_pkts[3];
1169
1170                         /* Mark mempool object as "put" since
1171                          * it is freed by NIX
1172                          */
1173                         __mempool_check_cookies(
1174                                 ((struct rte_mbuf *)mbuf0)->pool,
1175                                 (void **)&mbuf0, 1, 0);
1176
1177                         __mempool_check_cookies(
1178                                 ((struct rte_mbuf *)mbuf1)->pool,
1179                                 (void **)&mbuf1, 1, 0);
1180
1181                         __mempool_check_cookies(
1182                                 ((struct rte_mbuf *)mbuf2)->pool,
1183                                 (void **)&mbuf2, 1, 0);
1184
1185                         __mempool_check_cookies(
1186                                 ((struct rte_mbuf *)mbuf3)->pool,
1187                                 (void **)&mbuf3, 1, 0);
1188 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1189                         rte_io_wmb();
1190 #endif
1191                 }
1192
1193                 /* Create 4W cmd for 4 mbufs (sendhdr, sgdesc) */
1194                 cmd0[0] = vzip1q_u64(senddesc01_w0, senddesc01_w1);
1195                 cmd0[1] = vzip2q_u64(senddesc01_w0, senddesc01_w1);
1196                 cmd0[2] = vzip1q_u64(senddesc23_w0, senddesc23_w1);
1197                 cmd0[3] = vzip2q_u64(senddesc23_w0, senddesc23_w1);
1198
1199                 cmd1[0] = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
1200                 cmd1[1] = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
1201                 cmd1[2] = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
1202                 cmd1[3] = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
1203
1204                 do {
1205                         vst1q_u64(lmt_addr, cmd0[0]);
1206                         vst1q_u64(lmt_addr + 2, cmd1[0]);
1207                         vst1q_u64(lmt_addr + 4, cmd0[1]);
1208                         vst1q_u64(lmt_addr + 6, cmd1[1]);
1209                         vst1q_u64(lmt_addr + 8, cmd0[2]);
1210                         vst1q_u64(lmt_addr + 10, cmd1[2]);
1211                         vst1q_u64(lmt_addr + 12, cmd0[3]);
1212                         vst1q_u64(lmt_addr + 14, cmd1[3]);
1213                         lmt_status = roc_lmt_submit_ldeor(io_addr);
1214                 } while (lmt_status == 0);
1215                 tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP;
1216         }
1217
1218         if (unlikely(pkts_left))
1219                 pkts += cn9k_nix_xmit_pkts(tx_queue, tx_pkts, pkts_left, cmd,
1220                                            flags);
1221
1222         return pkts;
1223 }
1224
1225 #else
1226 static __rte_always_inline uint16_t
1227 cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
1228                           uint16_t pkts, uint64_t *cmd, const uint16_t flags)
1229 {
1230         RTE_SET_USED(tx_queue);
1231         RTE_SET_USED(tx_pkts);
1232         RTE_SET_USED(pkts);
1233         RTE_SET_USED(cmd);
1234         RTE_SET_USED(flags);
1235         return 0;
1236 }
1237 #endif
1238
1239 #define L3L4CSUM_F   NIX_TX_OFFLOAD_L3_L4_CSUM_F
1240 #define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F
1241 #define VLAN_F       NIX_TX_OFFLOAD_VLAN_QINQ_F
1242 #define NOFF_F       NIX_TX_OFFLOAD_MBUF_NOFF_F
1243 #define TSO_F        NIX_TX_OFFLOAD_TSO_F
1244
1245 /* [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
1246 #define NIX_TX_FASTPATH_MODES                                           \
1247 T(no_offload,                           0, 0, 0, 0, 0,  4,              \
1248                 NIX_TX_OFFLOAD_NONE)                                    \
1249 T(l3l4csum,                             0, 0, 0, 0, 1,  4,              \
1250                 L3L4CSUM_F)                                             \
1251 T(ol3ol4csum,                           0, 0, 0, 1, 0,  4,              \
1252                 OL3OL4CSUM_F)                                           \
1253 T(ol3ol4csum_l3l4csum,                  0, 0, 0, 1, 1,  4,              \
1254                 OL3OL4CSUM_F | L3L4CSUM_F)                              \
1255 T(vlan,                                 0, 0, 1, 0, 0,  6,              \
1256                 VLAN_F)                                                 \
1257 T(vlan_l3l4csum,                        0, 0, 1, 0, 1,  6,              \
1258                 VLAN_F | L3L4CSUM_F)                                    \
1259 T(vlan_ol3ol4csum,                      0, 0, 1, 1, 0,  6,              \
1260                 VLAN_F | OL3OL4CSUM_F)                                  \
1261 T(vlan_ol3ol4csum_l3l4csum,             0, 0, 1, 1, 1,  6,              \
1262                 VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)                     \
1263 T(noff,                                 0, 1, 0, 0, 0,  4,              \
1264                 NOFF_F)                                                 \
1265 T(noff_l3l4csum,                        0, 1, 0, 0, 1,  4,              \
1266                 NOFF_F | L3L4CSUM_F)                                    \
1267 T(noff_ol3ol4csum,                      0, 1, 0, 1, 0,  4,              \
1268                 NOFF_F | OL3OL4CSUM_F)                                  \
1269 T(noff_ol3ol4csum_l3l4csum,             0, 1, 0, 1, 1,  4,              \
1270                 NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)                     \
1271 T(noff_vlan,                            0, 1, 1, 0, 0,  6,              \
1272                 NOFF_F | VLAN_F)                                        \
1273 T(noff_vlan_l3l4csum,                   0, 1, 1, 0, 1,  6,              \
1274                 NOFF_F | VLAN_F | L3L4CSUM_F)                           \
1275 T(noff_vlan_ol3ol4csum,                 0, 1, 1, 1, 0,  6,              \
1276                 NOFF_F | VLAN_F | OL3OL4CSUM_F)                         \
1277 T(noff_vlan_ol3ol4csum_l3l4csum,        0, 1, 1, 1, 1,  6,              \
1278                 NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)            \
1279 T(tso,                                  1, 0, 0, 0, 0,  6,              \
1280                 TSO_F)                                                  \
1281 T(tso_l3l4csum,                         1, 0, 0, 0, 1,  6,              \
1282                 TSO_F | L3L4CSUM_F)                                     \
1283 T(tso_ol3ol4csum,                       1, 0, 0, 1, 0,  6,              \
1284                 TSO_F | OL3OL4CSUM_F)                                   \
1285 T(tso_ol3ol4csum_l3l4csum,              1, 0, 0, 1, 1,  6,              \
1286                 TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)                      \
1287 T(tso_vlan,                             1, 0, 1, 0, 0,  6,              \
1288                 TSO_F | VLAN_F)                                         \
1289 T(tso_vlan_l3l4csum,                    1, 0, 1, 0, 1,  6,              \
1290                 TSO_F | VLAN_F | L3L4CSUM_F)                            \
1291 T(tso_vlan_ol3ol4csum,                  1, 0, 1, 1, 0,  6,              \
1292                 TSO_F | VLAN_F | OL3OL4CSUM_F)                          \
1293 T(tso_vlan_ol3ol4csum_l3l4csum,         1, 0, 1, 1, 1,  6,              \
1294                 TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)             \
1295 T(tso_noff,                             1, 1, 0, 0, 0,  6,              \
1296                 TSO_F | NOFF_F)                                         \
1297 T(tso_noff_l3l4csum,                    1, 1, 0, 0, 1,  6,              \
1298                 TSO_F | NOFF_F | L3L4CSUM_F)                            \
1299 T(tso_noff_ol3ol4csum,                  1, 1, 0, 1, 0,  6,              \
1300                 TSO_F | NOFF_F | OL3OL4CSUM_F)                          \
1301 T(tso_noff_ol3ol4csum_l3l4csum,         1, 1, 0, 1, 1,  6,              \
1302                 TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)             \
1303 T(tso_noff_vlan,                        1, 1, 1, 0, 0,  6,              \
1304                 TSO_F | NOFF_F | VLAN_F)                                \
1305 T(tso_noff_vlan_l3l4csum,               1, 1, 1, 0, 1,  6,              \
1306                 TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)                   \
1307 T(tso_noff_vlan_ol3ol4csum,             1, 1, 1, 1, 0,  6,              \
1308                 TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)                 \
1309 T(tso_noff_vlan_ol3ol4csum_l3l4csum,    1, 1, 1, 1, 1,  6,              \
1310                 TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
1311
1312 #define T(name, f4, f3, f2, f1, f0, sz, flags)                                 \
1313         uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name(           \
1314                 void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);     \
1315                                                                                \
1316         uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_mseg_##name(      \
1317                 void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);     \
1318                                                                                \
1319         uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_vec_##name(       \
1320                 void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);
1321
1322 NIX_TX_FASTPATH_MODES
1323 #undef T
1324
1325 #endif /* __CN9K_TX_H__ */