1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 #define NIX_TX_OFFLOAD_NONE (0)
10 #define NIX_TX_OFFLOAD_L3_L4_CSUM_F BIT(0)
11 #define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
12 #define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
13 #define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
14 #define NIX_TX_OFFLOAD_TSO_F BIT(4)
16 /* Flags to control xmit_prepare function.
17 * Defining it from backwards to denote its been
18 * not used as offload flags to pick function
20 #define NIX_TX_MULTI_SEG_F BIT(15)
22 #define NIX_TX_NEED_SEND_HDR_W1 \
23 (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | \
24 NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
26 #define NIX_TX_NEED_EXT_HDR \
27 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
29 #define NIX_XMIT_FC_OR_RETURN(txq, pkts) \
31 /* Cached value is low, Update the fc_cache_pkts */ \
32 if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
33 /* Multiply with sqe_per_sqb to express in pkts */ \
34 (txq)->fc_cache_pkts = \
35 ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) \
36 << (txq)->sqes_per_sqb_log2; \
37 /* Check it again for the room */ \
38 if (unlikely((txq)->fc_cache_pkts < (pkts))) \
43 /* Function to determine no of tx subdesc required in case ext
44 * sub desc is enabled.
46 static __rte_always_inline int
47 cn9k_nix_tx_ext_subs(const uint16_t flags)
50 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ? 1 : 0;
53 static __rte_always_inline void
54 cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
56 uint64_t mask, ol_flags = m->ol_flags;
58 if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
59 uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
60 uint16_t *iplen, *oiplen, *oudplen;
61 uint16_t lso_sb, paylen;
63 mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
64 lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
65 m->l2_len + m->l3_len + m->l4_len;
67 /* Reduce payload len from base headers */
68 paylen = m->pkt_len - lso_sb;
70 /* Get iplen position assuming no tunnel hdr */
71 iplen = (uint16_t *)(mdata + m->l2_len +
72 (2 << !!(ol_flags & PKT_TX_IPV6)));
73 /* Handle tunnel tso */
74 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
75 (ol_flags & PKT_TX_TUNNEL_MASK)) {
76 const uint8_t is_udp_tun =
77 (CNXK_NIX_UDP_TUN_BITMASK >>
78 ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
81 oiplen = (uint16_t *)(mdata + m->outer_l2_len +
84 *oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
87 /* Update format for UDP tunneled packet */
89 oudplen = (uint16_t *)(mdata + m->outer_l2_len +
91 *oudplen = rte_cpu_to_be_16(
92 rte_be_to_cpu_16(*oudplen) - paylen);
95 /* Update iplen position to inner ip hdr */
96 iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
98 (2 << !!(ol_flags & PKT_TX_IPV6)));
101 *iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
105 static __rte_always_inline void
106 cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
107 const uint64_t lso_tun_fmt)
109 struct nix_send_ext_s *send_hdr_ext;
110 struct nix_send_hdr_s *send_hdr;
111 uint64_t ol_flags = 0, mask;
112 union nix_send_hdr_w1_u w1;
113 union nix_send_sg_s *sg;
115 send_hdr = (struct nix_send_hdr_s *)cmd;
116 if (flags & NIX_TX_NEED_EXT_HDR) {
117 send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);
118 sg = (union nix_send_sg_s *)(cmd + 4);
119 /* Clear previous markings */
120 send_hdr_ext->w0.lso = 0;
121 send_hdr_ext->w1.u = 0;
123 sg = (union nix_send_sg_s *)(cmd + 2);
126 if (flags & NIX_TX_NEED_SEND_HDR_W1) {
127 ol_flags = m->ol_flags;
131 if (!(flags & NIX_TX_MULTI_SEG_F)) {
132 send_hdr->w0.total = m->data_len;
134 roc_npa_aura_handle_to_aura(m->pool->pool_id);
139 * 3 => IPV4 with csum
141 * L3type and L3ptr needs to be set for either
142 * L3 csum or L4 csum or LSO
146 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
147 (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
148 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
149 const uint8_t ol3type =
150 ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
151 ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
152 !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
155 w1.ol3type = ol3type;
156 mask = 0xffffull << ((!!ol3type) << 4);
157 w1.ol3ptr = ~mask & m->outer_l2_len;
158 w1.ol4ptr = ~mask & (w1.ol3ptr + m->outer_l3_len);
161 w1.ol4type = csum + (csum << 1);
164 w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
165 ((!!(ol_flags & PKT_TX_IPV6)) << 2);
166 w1.il3ptr = w1.ol4ptr + m->l2_len;
167 w1.il4ptr = w1.il3ptr + m->l3_len;
168 /* Increment it by 1 if it is IPV4 as 3 is with csum */
169 w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
172 w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
174 /* In case of no tunnel header use only
175 * shift IL3/IL4 fields a bit to use
176 * OL3/OL4 for header checksum
179 w1.u = ((w1.u & 0xFFFFFFFF00000000) >> (mask << 3)) |
180 ((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
182 } else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
183 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
184 const uint8_t outer_l2_len = m->outer_l2_len;
187 w1.ol3ptr = outer_l2_len;
188 w1.ol4ptr = outer_l2_len + m->outer_l3_len;
189 /* Increment it by 1 if it is IPV4 as 3 is with csum */
190 w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
191 ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
192 !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
195 w1.ol4type = csum + (csum << 1);
197 } else if (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) {
198 const uint8_t l2_len = m->l2_len;
200 /* Always use OLXPTR and OLXTYPE when only
201 * when one header is present
206 w1.ol4ptr = l2_len + m->l3_len;
207 /* Increment it by 1 if it is IPV4 as 3 is with csum */
208 w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
209 ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
210 !!(ol_flags & PKT_TX_IP_CKSUM);
213 w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
216 if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
217 send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
218 /* HW will update ptr after vlan0 update */
219 send_hdr_ext->w1.vlan1_ins_ptr = 12;
220 send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
222 send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
223 /* 2B before end of l2 header */
224 send_hdr_ext->w1.vlan0_ins_ptr = 12;
225 send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
228 if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
232 mask = -(!w1.il3type);
233 lso_sb = (mask & w1.ol4ptr) + (~mask & w1.il4ptr) + m->l4_len;
235 send_hdr_ext->w0.lso_sb = lso_sb;
236 send_hdr_ext->w0.lso = 1;
237 send_hdr_ext->w0.lso_mps = m->tso_segsz;
238 send_hdr_ext->w0.lso_format =
239 NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
240 w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
242 /* Handle tunnel tso */
243 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
244 (ol_flags & PKT_TX_TUNNEL_MASK)) {
245 const uint8_t is_udp_tun =
246 (CNXK_NIX_UDP_TUN_BITMASK >>
247 ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
249 uint8_t shift = is_udp_tun ? 32 : 0;
251 shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
252 shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
254 w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
255 w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
256 /* Update format for UDP tunneled packet */
257 send_hdr_ext->w0.lso_format = (lso_tun_fmt >> shift);
261 if (flags & NIX_TX_NEED_SEND_HDR_W1)
262 send_hdr->w1.u = w1.u;
264 if (!(flags & NIX_TX_MULTI_SEG_F)) {
265 sg->seg1_size = m->data_len;
266 *(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
268 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
269 /* DF bit = 1 if refcount of current mbuf or parent mbuf
271 * DF bit = 0 otherwise
273 send_hdr->w0.df = cnxk_nix_prefree_seg(m);
274 /* Ensuring mbuf fields which got updated in
275 * cnxk_nix_prefree_seg are written before LMTST.
279 /* Mark mempool object as "put" since it is freed by NIX */
280 if (!send_hdr->w0.df)
281 __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
285 static __rte_always_inline void
286 cn9k_nix_xmit_one(uint64_t *cmd, void *lmt_addr, const rte_iova_t io_addr,
287 const uint32_t flags)
292 roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));
293 lmt_status = roc_lmt_submit_ldeor(io_addr);
294 } while (lmt_status == 0);
297 static __rte_always_inline void
298 cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags)
300 roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));
303 static __rte_always_inline uint64_t
304 cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr)
306 return roc_lmt_submit_ldeor(io_addr);
309 static __rte_always_inline uint64_t
310 cn9k_nix_xmit_submit_lmt_release(const rte_iova_t io_addr)
312 return roc_lmt_submit_ldeorl(io_addr);
315 static __rte_always_inline uint16_t
316 cn9k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
318 struct nix_send_hdr_s *send_hdr;
319 union nix_send_sg_s *sg;
320 struct rte_mbuf *m_next;
321 uint64_t *slist, sg_u;
326 send_hdr = (struct nix_send_hdr_s *)cmd;
327 send_hdr->w0.total = m->pkt_len;
328 send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
330 if (flags & NIX_TX_NEED_EXT_HDR)
335 sg = (union nix_send_sg_s *)&cmd[2 + off];
336 /* Clear sg->u header before use */
337 sg->u &= 0xFC00000000000000;
339 slist = &cmd[3 + off];
342 nb_segs = m->nb_segs;
344 /* Fill mbuf segments */
347 sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
348 *slist = rte_mbuf_data_iova(m);
349 /* Set invert df if buffer is not to be freed by H/W */
350 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
351 sg_u |= (cnxk_nix_prefree_seg(m) << (i + 55));
352 /* Commit changes to mbuf */
355 /* Mark mempool object as "put" since it is freed by NIX */
356 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
357 if (!(sg_u & (1ULL << (i + 55))))
358 __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
364 if (i > 2 && nb_segs) {
366 /* Next SG subdesc */
367 *(uint64_t *)slist = sg_u & 0xFC00000000000000;
370 sg = (union nix_send_sg_s *)slist;
379 segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
380 /* Roundup extra dwords to multiple of 2 */
381 segdw = (segdw >> 1) + (segdw & 0x1);
383 segdw += (off >> 1) + 1;
384 send_hdr->w0.sizem1 = segdw - 1;
389 static __rte_always_inline void
390 cn9k_nix_xmit_mseg_prep_lmt(uint64_t *cmd, void *lmt_addr, uint16_t segdw)
392 roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
395 static __rte_always_inline void
396 cn9k_nix_xmit_mseg_one(uint64_t *cmd, void *lmt_addr, rte_iova_t io_addr,
402 roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
403 lmt_status = roc_lmt_submit_ldeor(io_addr);
404 } while (lmt_status == 0);
407 static __rte_always_inline void
408 cn9k_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,
409 rte_iova_t io_addr, uint16_t segdw)
415 roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
416 lmt_status = roc_lmt_submit_ldeor(io_addr);
417 } while (lmt_status == 0);
420 static __rte_always_inline uint16_t
421 cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
422 uint64_t *cmd, const uint16_t flags)
424 struct cn9k_eth_txq *txq = tx_queue;
425 const rte_iova_t io_addr = txq->io_addr;
426 void *lmt_addr = txq->lmt_addr;
427 uint64_t lso_tun_fmt;
430 NIX_XMIT_FC_OR_RETURN(txq, pkts);
432 roc_lmt_mov(cmd, &txq->cmd[0], cn9k_nix_tx_ext_subs(flags));
434 /* Perform header writes before barrier for TSO */
435 if (flags & NIX_TX_OFFLOAD_TSO_F) {
436 lso_tun_fmt = txq->lso_tun_fmt;
438 for (i = 0; i < pkts; i++)
439 cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
442 /* Lets commit any changes in the packet here as no further changes
443 * to the packet will be done unless no fast free is enabled.
445 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
448 for (i = 0; i < pkts; i++) {
449 cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
450 cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
453 /* Reduce the cached count */
454 txq->fc_cache_pkts -= pkts;
459 static __rte_always_inline uint16_t
460 cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
461 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
463 struct cn9k_eth_txq *txq = tx_queue;
464 const rte_iova_t io_addr = txq->io_addr;
465 void *lmt_addr = txq->lmt_addr;
466 uint64_t lso_tun_fmt;
470 NIX_XMIT_FC_OR_RETURN(txq, pkts);
472 roc_lmt_mov(cmd, &txq->cmd[0], cn9k_nix_tx_ext_subs(flags));
474 /* Perform header writes before barrier for TSO */
475 if (flags & NIX_TX_OFFLOAD_TSO_F) {
476 lso_tun_fmt = txq->lso_tun_fmt;
478 for (i = 0; i < pkts; i++)
479 cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
482 /* Lets commit any changes in the packet here as no further changes
483 * to the packet will be done unless no fast free is enabled.
485 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
488 for (i = 0; i < pkts; i++) {
489 cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
490 segdw = cn9k_nix_prepare_mseg(tx_pkts[i], cmd, flags);
491 cn9k_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
494 /* Reduce the cached count */
495 txq->fc_cache_pkts -= pkts;
500 #if defined(RTE_ARCH_ARM64)
502 #define NIX_DESCS_PER_LOOP 4
503 static __rte_always_inline uint16_t
504 cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
505 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
507 uint64x2_t dataoff_iova0, dataoff_iova1, dataoff_iova2, dataoff_iova3;
508 uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3;
509 uint64x2_t cmd0[NIX_DESCS_PER_LOOP], cmd1[NIX_DESCS_PER_LOOP];
510 uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3;
511 uint64x2_t senddesc01_w0, senddesc23_w0;
512 uint64x2_t senddesc01_w1, senddesc23_w1;
513 uint64x2_t sgdesc01_w0, sgdesc23_w0;
514 uint64x2_t sgdesc01_w1, sgdesc23_w1;
515 struct cn9k_eth_txq *txq = tx_queue;
516 uint64_t *lmt_addr = txq->lmt_addr;
517 rte_iova_t io_addr = txq->io_addr;
518 uint64x2_t ltypes01, ltypes23;
519 uint64x2_t xtmp128, ytmp128;
520 uint64x2_t xmask01, xmask23;
521 uint64_t lmt_status, i;
524 NIX_XMIT_FC_OR_RETURN(txq, pkts);
526 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
527 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
529 /* Reduce the cached count */
530 txq->fc_cache_pkts -= pkts;
532 /* Lets commit any changes in the packet here as no further changes
533 * to the packet will be done unless no fast free is enabled.
535 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
538 senddesc01_w0 = vld1q_dup_u64(&txq->cmd[0]);
539 senddesc23_w0 = senddesc01_w0;
540 senddesc01_w1 = vdupq_n_u64(0);
541 senddesc23_w1 = senddesc01_w1;
542 sgdesc01_w0 = vld1q_dup_u64(&txq->cmd[2]);
543 sgdesc23_w0 = sgdesc01_w0;
545 for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
546 /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
548 vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
549 sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
551 senddesc23_w0 = senddesc01_w0;
552 sgdesc23_w0 = sgdesc01_w0;
554 /* Move mbufs to iova */
555 mbuf0 = (uint64_t *)tx_pkts[0];
556 mbuf1 = (uint64_t *)tx_pkts[1];
557 mbuf2 = (uint64_t *)tx_pkts[2];
558 mbuf3 = (uint64_t *)tx_pkts[3];
560 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
561 offsetof(struct rte_mbuf, buf_iova));
562 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
563 offsetof(struct rte_mbuf, buf_iova));
564 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
565 offsetof(struct rte_mbuf, buf_iova));
566 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
567 offsetof(struct rte_mbuf, buf_iova));
569 * Get mbuf's, olflags, iova, pktlen, dataoff
570 * dataoff_iovaX.D[0] = iova,
571 * dataoff_iovaX.D[1](15:0) = mbuf->dataoff
572 * len_olflagsX.D[0] = ol_flags,
573 * len_olflagsX.D[1](63:32) = mbuf->pkt_len
575 dataoff_iova0 = vld1q_u64(mbuf0);
576 len_olflags0 = vld1q_u64(mbuf0 + 2);
577 dataoff_iova1 = vld1q_u64(mbuf1);
578 len_olflags1 = vld1q_u64(mbuf1 + 2);
579 dataoff_iova2 = vld1q_u64(mbuf2);
580 len_olflags2 = vld1q_u64(mbuf2 + 2);
581 dataoff_iova3 = vld1q_u64(mbuf3);
582 len_olflags3 = vld1q_u64(mbuf3 + 2);
584 /* Move mbufs to point pool */
585 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
586 offsetof(struct rte_mbuf, pool) -
587 offsetof(struct rte_mbuf, buf_iova));
588 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
589 offsetof(struct rte_mbuf, pool) -
590 offsetof(struct rte_mbuf, buf_iova));
591 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
592 offsetof(struct rte_mbuf, pool) -
593 offsetof(struct rte_mbuf, buf_iova));
594 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
595 offsetof(struct rte_mbuf, pool) -
596 offsetof(struct rte_mbuf, buf_iova));
598 if (flags & (NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
599 NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
600 /* Get tx_offload for ol2, ol3, l2, l3 lengths */
602 * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
603 * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
606 asm volatile("LD1 {%[a].D}[0],[%[in]]\n\t"
607 : [a] "+w"(senddesc01_w1)
608 : [in] "r"(mbuf0 + 2)
611 asm volatile("LD1 {%[a].D}[1],[%[in]]\n\t"
612 : [a] "+w"(senddesc01_w1)
613 : [in] "r"(mbuf1 + 2)
616 asm volatile("LD1 {%[b].D}[0],[%[in]]\n\t"
617 : [b] "+w"(senddesc23_w1)
618 : [in] "r"(mbuf2 + 2)
621 asm volatile("LD1 {%[b].D}[1],[%[in]]\n\t"
622 : [b] "+w"(senddesc23_w1)
623 : [in] "r"(mbuf3 + 2)
626 /* Get pool pointer alone */
627 mbuf0 = (uint64_t *)*mbuf0;
628 mbuf1 = (uint64_t *)*mbuf1;
629 mbuf2 = (uint64_t *)*mbuf2;
630 mbuf3 = (uint64_t *)*mbuf3;
632 /* Get pool pointer alone */
633 mbuf0 = (uint64_t *)*mbuf0;
634 mbuf1 = (uint64_t *)*mbuf1;
635 mbuf2 = (uint64_t *)*mbuf2;
636 mbuf3 = (uint64_t *)*mbuf3;
639 const uint8x16_t shuf_mask2 = {
640 0x4, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
641 0xc, 0xd, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
643 xtmp128 = vzip2q_u64(len_olflags0, len_olflags1);
644 ytmp128 = vzip2q_u64(len_olflags2, len_olflags3);
646 /* Clear dataoff_iovaX.D[1] bits other than dataoff(15:0) */
647 const uint64x2_t and_mask0 = {
652 dataoff_iova0 = vandq_u64(dataoff_iova0, and_mask0);
653 dataoff_iova1 = vandq_u64(dataoff_iova1, and_mask0);
654 dataoff_iova2 = vandq_u64(dataoff_iova2, and_mask0);
655 dataoff_iova3 = vandq_u64(dataoff_iova3, and_mask0);
658 * Pick only 16 bits of pktlen preset at bits 63:32
659 * and place them at bits 15:0.
661 xtmp128 = vqtbl1q_u8(xtmp128, shuf_mask2);
662 ytmp128 = vqtbl1q_u8(ytmp128, shuf_mask2);
664 /* Add pairwise to get dataoff + iova in sgdesc_w1 */
665 sgdesc01_w1 = vpaddq_u64(dataoff_iova0, dataoff_iova1);
666 sgdesc23_w1 = vpaddq_u64(dataoff_iova2, dataoff_iova3);
668 /* Orr both sgdesc_w0 and senddesc_w0 with 16 bits of
669 * pktlen at 15:0 position.
671 sgdesc01_w0 = vorrq_u64(sgdesc01_w0, xtmp128);
672 sgdesc23_w0 = vorrq_u64(sgdesc23_w0, ytmp128);
673 senddesc01_w0 = vorrq_u64(senddesc01_w0, xtmp128);
674 senddesc23_w0 = vorrq_u64(senddesc23_w0, ytmp128);
676 /* Move mbuf to point to pool_id. */
677 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
678 offsetof(struct rte_mempool, pool_id));
679 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
680 offsetof(struct rte_mempool, pool_id));
681 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
682 offsetof(struct rte_mempool, pool_id));
683 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
684 offsetof(struct rte_mempool, pool_id));
686 if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
687 !(flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
689 * Lookup table to translate ol_flags to
690 * il3/il4 types. But we still use ol3/ol4 types in
691 * senddesc_w1 as only one header processing is enabled.
693 const uint8x16_t tbl = {
694 /* [0-15] = il4type:il3type */
695 0x04, /* none (IPv6 assumed) */
696 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
697 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
698 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
699 0x03, /* PKT_TX_IP_CKSUM */
700 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
701 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
702 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
703 0x02, /* PKT_TX_IPV4 */
704 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
705 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
706 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
707 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
708 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
711 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
714 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
719 /* Extract olflags to translate to iltypes */
720 xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
721 ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
724 * E(47):L3_LEN(9):L2_LEN(7+z)
725 * E(47):L3_LEN(9):L2_LEN(7+z)
727 senddesc01_w1 = vshlq_n_u64(senddesc01_w1, 1);
728 senddesc23_w1 = vshlq_n_u64(senddesc23_w1, 1);
730 /* Move OLFLAGS bits 55:52 to 51:48
731 * with zeros preprended on the byte and rest
734 xtmp128 = vshrq_n_u8(xtmp128, 4);
735 ytmp128 = vshrq_n_u8(ytmp128, 4);
737 * E(48):L3_LEN(8):L2_LEN(z+7)
738 * E(48):L3_LEN(8):L2_LEN(z+7)
740 const int8x16_t tshft3 = {
741 -1, 0, 8, 8, 8, 8, 8, 8,
742 -1, 0, 8, 8, 8, 8, 8, 8,
745 senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
746 senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
749 ltypes01 = vqtbl1q_u8(tbl, xtmp128);
750 ltypes23 = vqtbl1q_u8(tbl, ytmp128);
752 /* Pick only relevant fields i.e Bit 48:55 of iltype
753 * and place it in ol3/ol4type of senddesc_w1
755 const uint8x16_t shuf_mask0 = {
756 0xFF, 0xFF, 0xFF, 0xFF, 0x6, 0xFF, 0xFF, 0xFF,
757 0xFF, 0xFF, 0xFF, 0xFF, 0xE, 0xFF, 0xFF, 0xFF,
760 ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
761 ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
763 /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
764 * a [E(32):E(16):OL3(8):OL2(8)]
766 * a [E(32):E(16):(OL3+OL2):OL2]
767 * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
769 senddesc01_w1 = vaddq_u8(senddesc01_w1,
770 vshlq_n_u16(senddesc01_w1, 8));
771 senddesc23_w1 = vaddq_u8(senddesc23_w1,
772 vshlq_n_u16(senddesc23_w1, 8));
774 /* Move ltypes to senddesc*_w1 */
775 senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
776 senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
777 } else if (!(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
778 (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
780 * Lookup table to translate ol_flags to
784 const uint8x16_t tbl = {
785 /* [0-15] = ol4type:ol3type */
787 0x03, /* OUTER_IP_CKSUM */
788 0x02, /* OUTER_IPV4 */
789 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
790 0x04, /* OUTER_IPV6 */
791 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
792 0x00, /* OUTER_IPV6 | OUTER_IPV4 */
793 0x00, /* OUTER_IPV6 | OUTER_IPV4 |
796 0x00, /* OUTER_UDP_CKSUM */
797 0x33, /* OUTER_UDP_CKSUM | OUTER_IP_CKSUM */
798 0x32, /* OUTER_UDP_CKSUM | OUTER_IPV4 */
799 0x33, /* OUTER_UDP_CKSUM | OUTER_IPV4 |
802 0x34, /* OUTER_UDP_CKSUM | OUTER_IPV6 */
803 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
806 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
809 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
810 * OUTER_IPV4 | OUTER_IP_CKSUM
814 /* Extract olflags to translate to iltypes */
815 xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
816 ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
819 * E(47):OL3_LEN(9):OL2_LEN(7+z)
820 * E(47):OL3_LEN(9):OL2_LEN(7+z)
822 const uint8x16_t shuf_mask5 = {
823 0x6, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
824 0xE, 0xD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
826 senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
827 senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
829 /* Extract outer ol flags only */
830 const uint64x2_t o_cksum_mask = {
835 xtmp128 = vandq_u64(xtmp128, o_cksum_mask);
836 ytmp128 = vandq_u64(ytmp128, o_cksum_mask);
838 /* Extract OUTER_UDP_CKSUM bit 41 and
842 xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
843 ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
845 /* Shift oltype by 2 to start nibble from BIT(56)
848 xtmp128 = vshrq_n_u8(xtmp128, 2);
849 ytmp128 = vshrq_n_u8(ytmp128, 2);
851 * E(48):L3_LEN(8):L2_LEN(z+7)
852 * E(48):L3_LEN(8):L2_LEN(z+7)
854 const int8x16_t tshft3 = {
855 -1, 0, 8, 8, 8, 8, 8, 8,
856 -1, 0, 8, 8, 8, 8, 8, 8,
859 senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
860 senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
863 ltypes01 = vqtbl1q_u8(tbl, xtmp128);
864 ltypes23 = vqtbl1q_u8(tbl, ytmp128);
866 /* Pick only relevant fields i.e Bit 56:63 of oltype
867 * and place it in ol3/ol4type of senddesc_w1
869 const uint8x16_t shuf_mask0 = {
870 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0xFF, 0xFF, 0xFF,
871 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xFF, 0xFF, 0xFF,
874 ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
875 ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
877 /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
878 * a [E(32):E(16):OL3(8):OL2(8)]
880 * a [E(32):E(16):(OL3+OL2):OL2]
881 * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
883 senddesc01_w1 = vaddq_u8(senddesc01_w1,
884 vshlq_n_u16(senddesc01_w1, 8));
885 senddesc23_w1 = vaddq_u8(senddesc23_w1,
886 vshlq_n_u16(senddesc23_w1, 8));
888 /* Move ltypes to senddesc*_w1 */
889 senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
890 senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
891 } else if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
892 (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
893 /* Lookup table to translate ol_flags to
894 * ol4type, ol3type, il4type, il3type of senddesc_w1
896 const uint8x16x2_t tbl = {{
898 /* [0-15] = il4type:il3type */
899 0x04, /* none (IPv6) */
900 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
901 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
902 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
903 0x03, /* PKT_TX_IP_CKSUM */
904 0x13, /* PKT_TX_IP_CKSUM |
907 0x23, /* PKT_TX_IP_CKSUM |
910 0x33, /* PKT_TX_IP_CKSUM |
913 0x02, /* PKT_TX_IPV4 */
914 0x12, /* PKT_TX_IPV4 |
917 0x22, /* PKT_TX_IPV4 |
920 0x32, /* PKT_TX_IPV4 |
923 0x03, /* PKT_TX_IPV4 |
926 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
929 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
932 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
938 /* [16-31] = ol4type:ol3type */
940 0x03, /* OUTER_IP_CKSUM */
941 0x02, /* OUTER_IPV4 */
942 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
943 0x04, /* OUTER_IPV6 */
944 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
945 0x00, /* OUTER_IPV6 | OUTER_IPV4 */
946 0x00, /* OUTER_IPV6 | OUTER_IPV4 |
949 0x00, /* OUTER_UDP_CKSUM */
950 0x33, /* OUTER_UDP_CKSUM |
953 0x32, /* OUTER_UDP_CKSUM |
956 0x33, /* OUTER_UDP_CKSUM |
957 * OUTER_IPV4 | OUTER_IP_CKSUM
959 0x34, /* OUTER_UDP_CKSUM |
962 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
965 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
968 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
969 * OUTER_IPV4 | OUTER_IP_CKSUM
974 /* Extract olflags to translate to oltype & iltype */
975 xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
976 ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
979 * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
980 * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
982 const uint32x4_t tshft_4 = {
988 senddesc01_w1 = vshlq_u32(senddesc01_w1, tshft_4);
989 senddesc23_w1 = vshlq_u32(senddesc23_w1, tshft_4);
992 * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
993 * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
995 const uint8x16_t shuf_mask5 = {
996 0x6, 0x5, 0x0, 0x1, 0xFF, 0xFF, 0xFF, 0xFF,
997 0xE, 0xD, 0x8, 0x9, 0xFF, 0xFF, 0xFF, 0xFF,
999 senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
1000 senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
1002 /* Extract outer and inner header ol_flags */
1003 const uint64x2_t oi_cksum_mask = {
1008 xtmp128 = vandq_u64(xtmp128, oi_cksum_mask);
1009 ytmp128 = vandq_u64(ytmp128, oi_cksum_mask);
1011 /* Extract OUTER_UDP_CKSUM bit 41 and
1015 xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
1016 ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
1018 /* Shift right oltype by 2 and iltype by 4
1019 * to start oltype nibble from BIT(58)
1020 * instead of BIT(56) and iltype nibble from BIT(48)
1021 * instead of BIT(52).
1023 const int8x16_t tshft5 = {
1024 8, 8, 8, 8, 8, 8, -4, -2,
1025 8, 8, 8, 8, 8, 8, -4, -2,
1028 xtmp128 = vshlq_u8(xtmp128, tshft5);
1029 ytmp128 = vshlq_u8(ytmp128, tshft5);
1031 * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
1032 * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
1034 const int8x16_t tshft3 = {
1035 -1, 0, -1, 0, 0, 0, 0, 0,
1036 -1, 0, -1, 0, 0, 0, 0, 0,
1039 senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
1040 senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
1042 /* Mark Bit(4) of oltype */
1043 const uint64x2_t oi_cksum_mask2 = {
1048 xtmp128 = vorrq_u64(xtmp128, oi_cksum_mask2);
1049 ytmp128 = vorrq_u64(ytmp128, oi_cksum_mask2);
1052 ltypes01 = vqtbl2q_u8(tbl, xtmp128);
1053 ltypes23 = vqtbl2q_u8(tbl, ytmp128);
1055 /* Pick only relevant fields i.e Bit 48:55 of iltype and
1056 * Bit 56:63 of oltype and place it in corresponding
1057 * place in senddesc_w1.
1059 const uint8x16_t shuf_mask0 = {
1060 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0x6, 0xFF, 0xFF,
1061 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xE, 0xFF, 0xFF,
1064 ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
1065 ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
1067 /* Prepare l4ptr, l3ptr, ol4ptr, ol3ptr from
1068 * l3len, l2len, ol3len, ol2len.
1069 * a [E(32):L3(8):L2(8):OL3(8):OL2(8)]
1071 * a [E:(L3+L2):(L2+OL3):(OL3+OL2):OL2]
1073 * a [E:(L3+L2+OL3+OL2):(L2+OL3+OL2):(OL3+OL2):OL2]
1074 * => E(32):IL4PTR(8):IL3PTR(8):OL4PTR(8):OL3PTR(8)
1076 senddesc01_w1 = vaddq_u8(senddesc01_w1,
1077 vshlq_n_u32(senddesc01_w1, 8));
1078 senddesc23_w1 = vaddq_u8(senddesc23_w1,
1079 vshlq_n_u32(senddesc23_w1, 8));
1081 /* Continue preparing l4ptr, l3ptr, ol4ptr, ol3ptr */
1082 senddesc01_w1 = vaddq_u8(
1083 senddesc01_w1, vshlq_n_u32(senddesc01_w1, 16));
1084 senddesc23_w1 = vaddq_u8(
1085 senddesc23_w1, vshlq_n_u32(senddesc23_w1, 16));
1087 /* Move ltypes to senddesc*_w1 */
1088 senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
1089 senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
1092 xmask01 = vdupq_n_u64(0);
1094 asm volatile("LD1 {%[a].H}[0],[%[in]]\n\t"
1099 asm volatile("LD1 {%[a].H}[4],[%[in]]\n\t"
1104 asm volatile("LD1 {%[b].H}[0],[%[in]]\n\t"
1109 asm volatile("LD1 {%[b].H}[4],[%[in]]\n\t"
1113 xmask01 = vshlq_n_u64(xmask01, 20);
1114 xmask23 = vshlq_n_u64(xmask23, 20);
1116 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
1117 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
1119 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
1120 /* Set don't free bit if reference count > 1 */
1121 xmask01 = vdupq_n_u64(0);
1124 /* Move mbufs to iova */
1125 mbuf0 = (uint64_t *)tx_pkts[0];
1126 mbuf1 = (uint64_t *)tx_pkts[1];
1127 mbuf2 = (uint64_t *)tx_pkts[2];
1128 mbuf3 = (uint64_t *)tx_pkts[3];
1130 if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0))
1131 vsetq_lane_u64(0x80000, xmask01, 0);
1133 __mempool_check_cookies(
1134 ((struct rte_mbuf *)mbuf0)->pool,
1135 (void **)&mbuf0, 1, 0);
1137 if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1))
1138 vsetq_lane_u64(0x80000, xmask01, 1);
1140 __mempool_check_cookies(
1141 ((struct rte_mbuf *)mbuf1)->pool,
1142 (void **)&mbuf1, 1, 0);
1144 if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2))
1145 vsetq_lane_u64(0x80000, xmask23, 0);
1147 __mempool_check_cookies(
1148 ((struct rte_mbuf *)mbuf2)->pool,
1149 (void **)&mbuf2, 1, 0);
1151 if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3))
1152 vsetq_lane_u64(0x80000, xmask23, 1);
1154 __mempool_check_cookies(
1155 ((struct rte_mbuf *)mbuf3)->pool,
1156 (void **)&mbuf3, 1, 0);
1157 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
1158 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
1159 /* Ensuring mbuf fields which got updated in
1160 * cnxk_nix_prefree_seg are written before LMTST.
1164 /* Move mbufs to iova */
1165 mbuf0 = (uint64_t *)tx_pkts[0];
1166 mbuf1 = (uint64_t *)tx_pkts[1];
1167 mbuf2 = (uint64_t *)tx_pkts[2];
1168 mbuf3 = (uint64_t *)tx_pkts[3];
1170 /* Mark mempool object as "put" since
1171 * it is freed by NIX
1173 __mempool_check_cookies(
1174 ((struct rte_mbuf *)mbuf0)->pool,
1175 (void **)&mbuf0, 1, 0);
1177 __mempool_check_cookies(
1178 ((struct rte_mbuf *)mbuf1)->pool,
1179 (void **)&mbuf1, 1, 0);
1181 __mempool_check_cookies(
1182 ((struct rte_mbuf *)mbuf2)->pool,
1183 (void **)&mbuf2, 1, 0);
1185 __mempool_check_cookies(
1186 ((struct rte_mbuf *)mbuf3)->pool,
1187 (void **)&mbuf3, 1, 0);
1188 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1193 /* Create 4W cmd for 4 mbufs (sendhdr, sgdesc) */
1194 cmd0[0] = vzip1q_u64(senddesc01_w0, senddesc01_w1);
1195 cmd0[1] = vzip2q_u64(senddesc01_w0, senddesc01_w1);
1196 cmd0[2] = vzip1q_u64(senddesc23_w0, senddesc23_w1);
1197 cmd0[3] = vzip2q_u64(senddesc23_w0, senddesc23_w1);
1199 cmd1[0] = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
1200 cmd1[1] = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
1201 cmd1[2] = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
1202 cmd1[3] = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
1205 vst1q_u64(lmt_addr, cmd0[0]);
1206 vst1q_u64(lmt_addr + 2, cmd1[0]);
1207 vst1q_u64(lmt_addr + 4, cmd0[1]);
1208 vst1q_u64(lmt_addr + 6, cmd1[1]);
1209 vst1q_u64(lmt_addr + 8, cmd0[2]);
1210 vst1q_u64(lmt_addr + 10, cmd1[2]);
1211 vst1q_u64(lmt_addr + 12, cmd0[3]);
1212 vst1q_u64(lmt_addr + 14, cmd1[3]);
1213 lmt_status = roc_lmt_submit_ldeor(io_addr);
1214 } while (lmt_status == 0);
1215 tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP;
1218 if (unlikely(pkts_left))
1219 pkts += cn9k_nix_xmit_pkts(tx_queue, tx_pkts, pkts_left, cmd,
1226 static __rte_always_inline uint16_t
1227 cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
1228 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
1230 RTE_SET_USED(tx_queue);
1231 RTE_SET_USED(tx_pkts);
1234 RTE_SET_USED(flags);
1239 #define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F
1240 #define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F
1241 #define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F
1242 #define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F
1243 #define TSO_F NIX_TX_OFFLOAD_TSO_F
1245 /* [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
1246 #define NIX_TX_FASTPATH_MODES \
1247 T(no_offload, 0, 0, 0, 0, 0, 4, \
1248 NIX_TX_OFFLOAD_NONE) \
1249 T(l3l4csum, 0, 0, 0, 0, 1, 4, \
1251 T(ol3ol4csum, 0, 0, 0, 1, 0, 4, \
1253 T(ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 4, \
1254 OL3OL4CSUM_F | L3L4CSUM_F) \
1255 T(vlan, 0, 0, 1, 0, 0, 6, \
1257 T(vlan_l3l4csum, 0, 0, 1, 0, 1, 6, \
1258 VLAN_F | L3L4CSUM_F) \
1259 T(vlan_ol3ol4csum, 0, 0, 1, 1, 0, 6, \
1260 VLAN_F | OL3OL4CSUM_F) \
1261 T(vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 6, \
1262 VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
1263 T(noff, 0, 1, 0, 0, 0, 4, \
1265 T(noff_l3l4csum, 0, 1, 0, 0, 1, 4, \
1266 NOFF_F | L3L4CSUM_F) \
1267 T(noff_ol3ol4csum, 0, 1, 0, 1, 0, 4, \
1268 NOFF_F | OL3OL4CSUM_F) \
1269 T(noff_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 4, \
1270 NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
1271 T(noff_vlan, 0, 1, 1, 0, 0, 6, \
1273 T(noff_vlan_l3l4csum, 0, 1, 1, 0, 1, 6, \
1274 NOFF_F | VLAN_F | L3L4CSUM_F) \
1275 T(noff_vlan_ol3ol4csum, 0, 1, 1, 1, 0, 6, \
1276 NOFF_F | VLAN_F | OL3OL4CSUM_F) \
1277 T(noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 6, \
1278 NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
1279 T(tso, 1, 0, 0, 0, 0, 6, \
1281 T(tso_l3l4csum, 1, 0, 0, 0, 1, 6, \
1282 TSO_F | L3L4CSUM_F) \
1283 T(tso_ol3ol4csum, 1, 0, 0, 1, 0, 6, \
1284 TSO_F | OL3OL4CSUM_F) \
1285 T(tso_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 6, \
1286 TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
1287 T(tso_vlan, 1, 0, 1, 0, 0, 6, \
1289 T(tso_vlan_l3l4csum, 1, 0, 1, 0, 1, 6, \
1290 TSO_F | VLAN_F | L3L4CSUM_F) \
1291 T(tso_vlan_ol3ol4csum, 1, 0, 1, 1, 0, 6, \
1292 TSO_F | VLAN_F | OL3OL4CSUM_F) \
1293 T(tso_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 6, \
1294 TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
1295 T(tso_noff, 1, 1, 0, 0, 0, 6, \
1297 T(tso_noff_l3l4csum, 1, 1, 0, 0, 1, 6, \
1298 TSO_F | NOFF_F | L3L4CSUM_F) \
1299 T(tso_noff_ol3ol4csum, 1, 1, 0, 1, 0, 6, \
1300 TSO_F | NOFF_F | OL3OL4CSUM_F) \
1301 T(tso_noff_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 6, \
1302 TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
1303 T(tso_noff_vlan, 1, 1, 1, 0, 0, 6, \
1304 TSO_F | NOFF_F | VLAN_F) \
1305 T(tso_noff_vlan_l3l4csum, 1, 1, 1, 0, 1, 6, \
1306 TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
1307 T(tso_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 0, 6, \
1308 TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
1309 T(tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 6, \
1310 TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
1312 #define T(name, f4, f3, f2, f1, f0, sz, flags) \
1313 uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name( \
1314 void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
1316 uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_mseg_##name( \
1317 void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
1319 uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_vec_##name( \
1320 void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);
1322 NIX_TX_FASTPATH_MODES
1325 #endif /* __CN9K_TX_H__ */