1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
7 #define NIX_TX_OFFLOAD_NONE (0)
8 #define NIX_TX_OFFLOAD_L3_L4_CSUM_F BIT(0)
9 #define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
10 #define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
11 #define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
12 #define NIX_TX_OFFLOAD_TSO_F BIT(4)
14 /* Flags to control xmit_prepare function.
15 * Defining it from backwards to denote its been
16 * not used as offload flags to pick function
18 #define NIX_TX_MULTI_SEG_F BIT(15)
20 #define NIX_TX_NEED_SEND_HDR_W1 \
21 (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | \
22 NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
24 #define NIX_TX_NEED_EXT_HDR \
25 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
27 #define NIX_XMIT_FC_OR_RETURN(txq, pkts) \
29 /* Cached value is low, Update the fc_cache_pkts */ \
30 if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
31 /* Multiply with sqe_per_sqb to express in pkts */ \
32 (txq)->fc_cache_pkts = \
33 ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) \
34 << (txq)->sqes_per_sqb_log2; \
35 /* Check it again for the room */ \
36 if (unlikely((txq)->fc_cache_pkts < (pkts))) \
41 /* Function to determine no of tx subdesc required in case ext
42 * sub desc is enabled.
44 static __rte_always_inline int
45 cn9k_nix_tx_ext_subs(const uint16_t flags)
48 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ? 1 : 0;
51 static __rte_always_inline void
52 cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
54 uint64_t mask, ol_flags = m->ol_flags;
56 if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
57 uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
58 uint16_t *iplen, *oiplen, *oudplen;
59 uint16_t lso_sb, paylen;
61 mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
62 lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
63 m->l2_len + m->l3_len + m->l4_len;
65 /* Reduce payload len from base headers */
66 paylen = m->pkt_len - lso_sb;
68 /* Get iplen position assuming no tunnel hdr */
69 iplen = (uint16_t *)(mdata + m->l2_len +
70 (2 << !!(ol_flags & PKT_TX_IPV6)));
71 /* Handle tunnel tso */
72 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
73 (ol_flags & PKT_TX_TUNNEL_MASK)) {
74 const uint8_t is_udp_tun =
75 (CNXK_NIX_UDP_TUN_BITMASK >>
76 ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
79 oiplen = (uint16_t *)(mdata + m->outer_l2_len +
82 *oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
85 /* Update format for UDP tunneled packet */
87 oudplen = (uint16_t *)(mdata + m->outer_l2_len +
89 *oudplen = rte_cpu_to_be_16(
90 rte_be_to_cpu_16(*oudplen) - paylen);
93 /* Update iplen position to inner ip hdr */
94 iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
96 (2 << !!(ol_flags & PKT_TX_IPV6)));
99 *iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
103 static __rte_always_inline void
104 cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
105 const uint64_t lso_tun_fmt)
107 struct nix_send_ext_s *send_hdr_ext;
108 struct nix_send_hdr_s *send_hdr;
109 uint64_t ol_flags = 0, mask;
110 union nix_send_hdr_w1_u w1;
111 union nix_send_sg_s *sg;
113 send_hdr = (struct nix_send_hdr_s *)cmd;
114 if (flags & NIX_TX_NEED_EXT_HDR) {
115 send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);
116 sg = (union nix_send_sg_s *)(cmd + 4);
117 /* Clear previous markings */
118 send_hdr_ext->w0.lso = 0;
119 send_hdr_ext->w1.u = 0;
121 sg = (union nix_send_sg_s *)(cmd + 2);
124 if (flags & NIX_TX_NEED_SEND_HDR_W1) {
125 ol_flags = m->ol_flags;
129 if (!(flags & NIX_TX_MULTI_SEG_F)) {
130 send_hdr->w0.total = m->data_len;
132 roc_npa_aura_handle_to_aura(m->pool->pool_id);
137 * 3 => IPV4 with csum
139 * L3type and L3ptr needs to be set for either
140 * L3 csum or L4 csum or LSO
144 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
145 (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
146 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
147 const uint8_t ol3type =
148 ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
149 ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
150 !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
153 w1.ol3type = ol3type;
154 mask = 0xffffull << ((!!ol3type) << 4);
155 w1.ol3ptr = ~mask & m->outer_l2_len;
156 w1.ol4ptr = ~mask & (w1.ol3ptr + m->outer_l3_len);
159 w1.ol4type = csum + (csum << 1);
162 w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
163 ((!!(ol_flags & PKT_TX_IPV6)) << 2);
164 w1.il3ptr = w1.ol4ptr + m->l2_len;
165 w1.il4ptr = w1.il3ptr + m->l3_len;
166 /* Increment it by 1 if it is IPV4 as 3 is with csum */
167 w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
170 w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
172 /* In case of no tunnel header use only
173 * shift IL3/IL4 fields a bit to use
174 * OL3/OL4 for header checksum
177 w1.u = ((w1.u & 0xFFFFFFFF00000000) >> (mask << 3)) |
178 ((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
180 } else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
181 const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
182 const uint8_t outer_l2_len = m->outer_l2_len;
185 w1.ol3ptr = outer_l2_len;
186 w1.ol4ptr = outer_l2_len + m->outer_l3_len;
187 /* Increment it by 1 if it is IPV4 as 3 is with csum */
188 w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
189 ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
190 !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
193 w1.ol4type = csum + (csum << 1);
195 } else if (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) {
196 const uint8_t l2_len = m->l2_len;
198 /* Always use OLXPTR and OLXTYPE when only
199 * when one header is present
204 w1.ol4ptr = l2_len + m->l3_len;
205 /* Increment it by 1 if it is IPV4 as 3 is with csum */
206 w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
207 ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
208 !!(ol_flags & PKT_TX_IP_CKSUM);
211 w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
214 if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
215 send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
216 /* HW will update ptr after vlan0 update */
217 send_hdr_ext->w1.vlan1_ins_ptr = 12;
218 send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
220 send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
221 /* 2B before end of l2 header */
222 send_hdr_ext->w1.vlan0_ins_ptr = 12;
223 send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
226 if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
230 mask = -(!w1.il3type);
231 lso_sb = (mask & w1.ol4ptr) + (~mask & w1.il4ptr) + m->l4_len;
233 send_hdr_ext->w0.lso_sb = lso_sb;
234 send_hdr_ext->w0.lso = 1;
235 send_hdr_ext->w0.lso_mps = m->tso_segsz;
236 send_hdr_ext->w0.lso_format =
237 NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
238 w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
240 /* Handle tunnel tso */
241 if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
242 (ol_flags & PKT_TX_TUNNEL_MASK)) {
243 const uint8_t is_udp_tun =
244 (CNXK_NIX_UDP_TUN_BITMASK >>
245 ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
247 uint8_t shift = is_udp_tun ? 32 : 0;
249 shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
250 shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
252 w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
253 w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
254 /* Update format for UDP tunneled packet */
255 send_hdr_ext->w0.lso_format = (lso_tun_fmt >> shift);
259 if (flags & NIX_TX_NEED_SEND_HDR_W1)
260 send_hdr->w1.u = w1.u;
262 if (!(flags & NIX_TX_MULTI_SEG_F)) {
263 sg->seg1_size = m->data_len;
264 *(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
266 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
267 /* DF bit = 1 if refcount of current mbuf or parent mbuf
269 * DF bit = 0 otherwise
271 send_hdr->w0.df = cnxk_nix_prefree_seg(m);
272 /* Ensuring mbuf fields which got updated in
273 * cnxk_nix_prefree_seg are written before LMTST.
277 /* Mark mempool object as "put" since it is freed by NIX */
278 if (!send_hdr->w0.df)
279 __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
283 static __rte_always_inline void
284 cn9k_nix_xmit_one(uint64_t *cmd, void *lmt_addr, const rte_iova_t io_addr,
285 const uint32_t flags)
290 roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));
291 lmt_status = roc_lmt_submit_ldeor(io_addr);
292 } while (lmt_status == 0);
295 static __rte_always_inline void
296 cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags)
298 roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags));
301 static __rte_always_inline uint64_t
302 cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr)
304 return roc_lmt_submit_ldeor(io_addr);
307 static __rte_always_inline uint64_t
308 cn9k_nix_xmit_submit_lmt_release(const rte_iova_t io_addr)
310 return roc_lmt_submit_ldeorl(io_addr);
313 static __rte_always_inline uint16_t
314 cn9k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
316 struct nix_send_hdr_s *send_hdr;
317 union nix_send_sg_s *sg;
318 struct rte_mbuf *m_next;
319 uint64_t *slist, sg_u;
324 send_hdr = (struct nix_send_hdr_s *)cmd;
325 send_hdr->w0.total = m->pkt_len;
326 send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
328 if (flags & NIX_TX_NEED_EXT_HDR)
333 sg = (union nix_send_sg_s *)&cmd[2 + off];
334 /* Clear sg->u header before use */
335 sg->u &= 0xFC00000000000000;
337 slist = &cmd[3 + off];
340 nb_segs = m->nb_segs;
342 /* Fill mbuf segments */
345 sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
346 *slist = rte_mbuf_data_iova(m);
347 /* Set invert df if buffer is not to be freed by H/W */
348 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
349 sg_u |= (cnxk_nix_prefree_seg(m) << (i + 55));
350 /* Commit changes to mbuf */
353 /* Mark mempool object as "put" since it is freed by NIX */
354 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
355 if (!(sg_u & (1ULL << (i + 55))))
356 __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
362 if (i > 2 && nb_segs) {
364 /* Next SG subdesc */
365 *(uint64_t *)slist = sg_u & 0xFC00000000000000;
368 sg = (union nix_send_sg_s *)slist;
377 segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
378 /* Roundup extra dwords to multiple of 2 */
379 segdw = (segdw >> 1) + (segdw & 0x1);
381 segdw += (off >> 1) + 1;
382 send_hdr->w0.sizem1 = segdw - 1;
387 static __rte_always_inline void
388 cn9k_nix_xmit_mseg_prep_lmt(uint64_t *cmd, void *lmt_addr, uint16_t segdw)
390 roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
393 static __rte_always_inline void
394 cn9k_nix_xmit_mseg_one(uint64_t *cmd, void *lmt_addr, rte_iova_t io_addr,
400 roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
401 lmt_status = roc_lmt_submit_ldeor(io_addr);
402 } while (lmt_status == 0);
405 static __rte_always_inline void
406 cn9k_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,
407 rte_iova_t io_addr, uint16_t segdw)
413 roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
414 lmt_status = roc_lmt_submit_ldeor(io_addr);
415 } while (lmt_status == 0);
418 static __rte_always_inline uint16_t
419 cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
420 uint64_t *cmd, const uint16_t flags)
422 struct cn9k_eth_txq *txq = tx_queue;
423 const rte_iova_t io_addr = txq->io_addr;
424 void *lmt_addr = txq->lmt_addr;
425 uint64_t lso_tun_fmt;
428 NIX_XMIT_FC_OR_RETURN(txq, pkts);
430 roc_lmt_mov(cmd, &txq->cmd[0], cn9k_nix_tx_ext_subs(flags));
432 /* Perform header writes before barrier for TSO */
433 if (flags & NIX_TX_OFFLOAD_TSO_F) {
434 lso_tun_fmt = txq->lso_tun_fmt;
436 for (i = 0; i < pkts; i++)
437 cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
440 /* Lets commit any changes in the packet here as no further changes
441 * to the packet will be done unless no fast free is enabled.
443 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
446 for (i = 0; i < pkts; i++) {
447 cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
448 cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
451 /* Reduce the cached count */
452 txq->fc_cache_pkts -= pkts;
457 static __rte_always_inline uint16_t
458 cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
459 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
461 struct cn9k_eth_txq *txq = tx_queue;
462 const rte_iova_t io_addr = txq->io_addr;
463 void *lmt_addr = txq->lmt_addr;
464 uint64_t lso_tun_fmt;
468 NIX_XMIT_FC_OR_RETURN(txq, pkts);
470 roc_lmt_mov(cmd, &txq->cmd[0], cn9k_nix_tx_ext_subs(flags));
472 /* Perform header writes before barrier for TSO */
473 if (flags & NIX_TX_OFFLOAD_TSO_F) {
474 lso_tun_fmt = txq->lso_tun_fmt;
476 for (i = 0; i < pkts; i++)
477 cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
480 /* Lets commit any changes in the packet here as no further changes
481 * to the packet will be done unless no fast free is enabled.
483 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
486 for (i = 0; i < pkts; i++) {
487 cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
488 segdw = cn9k_nix_prepare_mseg(tx_pkts[i], cmd, flags);
489 cn9k_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
492 /* Reduce the cached count */
493 txq->fc_cache_pkts -= pkts;
499 #define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F
500 #define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F
501 #define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F
502 #define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F
503 #define TSO_F NIX_TX_OFFLOAD_TSO_F
505 /* [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
506 #define NIX_TX_FASTPATH_MODES \
507 T(no_offload, 0, 0, 0, 0, 0, 4, \
508 NIX_TX_OFFLOAD_NONE) \
509 T(l3l4csum, 0, 0, 0, 0, 1, 4, \
511 T(ol3ol4csum, 0, 0, 0, 1, 0, 4, \
513 T(ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 4, \
514 OL3OL4CSUM_F | L3L4CSUM_F) \
515 T(vlan, 0, 0, 1, 0, 0, 6, \
517 T(vlan_l3l4csum, 0, 0, 1, 0, 1, 6, \
518 VLAN_F | L3L4CSUM_F) \
519 T(vlan_ol3ol4csum, 0, 0, 1, 1, 0, 6, \
520 VLAN_F | OL3OL4CSUM_F) \
521 T(vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 6, \
522 VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
523 T(noff, 0, 1, 0, 0, 0, 4, \
525 T(noff_l3l4csum, 0, 1, 0, 0, 1, 4, \
526 NOFF_F | L3L4CSUM_F) \
527 T(noff_ol3ol4csum, 0, 1, 0, 1, 0, 4, \
528 NOFF_F | OL3OL4CSUM_F) \
529 T(noff_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 4, \
530 NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
531 T(noff_vlan, 0, 1, 1, 0, 0, 6, \
533 T(noff_vlan_l3l4csum, 0, 1, 1, 0, 1, 6, \
534 NOFF_F | VLAN_F | L3L4CSUM_F) \
535 T(noff_vlan_ol3ol4csum, 0, 1, 1, 1, 0, 6, \
536 NOFF_F | VLAN_F | OL3OL4CSUM_F) \
537 T(noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 6, \
538 NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
539 T(tso, 1, 0, 0, 0, 0, 6, \
541 T(tso_l3l4csum, 1, 0, 0, 0, 1, 6, \
542 TSO_F | L3L4CSUM_F) \
543 T(tso_ol3ol4csum, 1, 0, 0, 1, 0, 6, \
544 TSO_F | OL3OL4CSUM_F) \
545 T(tso_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 6, \
546 TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
547 T(tso_vlan, 1, 0, 1, 0, 0, 6, \
549 T(tso_vlan_l3l4csum, 1, 0, 1, 0, 1, 6, \
550 TSO_F | VLAN_F | L3L4CSUM_F) \
551 T(tso_vlan_ol3ol4csum, 1, 0, 1, 1, 0, 6, \
552 TSO_F | VLAN_F | OL3OL4CSUM_F) \
553 T(tso_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 6, \
554 TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
555 T(tso_noff, 1, 1, 0, 0, 0, 6, \
557 T(tso_noff_l3l4csum, 1, 1, 0, 0, 1, 6, \
558 TSO_F | NOFF_F | L3L4CSUM_F) \
559 T(tso_noff_ol3ol4csum, 1, 1, 0, 1, 0, 6, \
560 TSO_F | NOFF_F | OL3OL4CSUM_F) \
561 T(tso_noff_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 6, \
562 TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
563 T(tso_noff_vlan, 1, 1, 1, 0, 0, 6, \
564 TSO_F | NOFF_F | VLAN_F) \
565 T(tso_noff_vlan_l3l4csum, 1, 1, 1, 0, 1, 6, \
566 TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
567 T(tso_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 0, 6, \
568 TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
569 T(tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 6, \
570 TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
572 #define T(name, f4, f3, f2, f1, f0, sz, flags) \
573 uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name( \
574 void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
576 uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_mseg_##name( \
577 void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);
579 NIX_TX_FASTPATH_MODES
582 #endif /* __CN9K_TX_H__ */