#include <rte_vect.h>
+#include <rte_eventdev.h>
+
#define NIX_TX_OFFLOAD_NONE (0)
#define NIX_TX_OFFLOAD_L3_L4_CSUM_F BIT(0)
#define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
static __rte_always_inline int
cn10k_nix_tx_ext_subs(const uint16_t flags)
{
- return (flags & NIX_TX_OFFLOAD_TSTAMP_F)
- ? 2
- : ((flags &
- (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F))
- ? 1
- : 0);
+ return (flags & NIX_TX_OFFLOAD_TSTAMP_F) ?
+ 2 :
+ ((flags &
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ?
+ 1 :
+ 0);
+}
+
+static __rte_always_inline uint8_t
+cn10k_nix_tx_dwords(const uint16_t flags, const uint8_t segdw)
+{
+ if (!(flags & NIX_TX_MULTI_SEG_F))
+ return cn10k_nix_tx_ext_subs(flags) + 2;
+
+ /* Already everything is accounted for in segdw */
+ return segdw;
}
static __rte_always_inline uint8_t
return data;
}
+static __rte_always_inline uint64_t
+cn10k_cpt_tx_steor_data(void)
+{
+ /* We have two CPT instructions per LMTLine */
+ const uint64_t dw_m1 = ROC_CN10K_TWO_CPT_INST_DW_M1;
+ uint64_t data;
+
+ /* This will be moved to addr area */
+ data = dw_m1 << 16;
+ data |= dw_m1 << 19;
+ data |= dw_m1 << 22;
+ data |= dw_m1 << 25;
+ data |= dw_m1 << 28;
+ data |= dw_m1 << 31;
+ data |= dw_m1 << 34;
+ data |= dw_m1 << 37;
+ data |= dw_m1 << 40;
+ data |= dw_m1 << 43;
+ data |= dw_m1 << 46;
+ data |= dw_m1 << 49;
+ data |= dw_m1 << 52;
+ data |= dw_m1 << 55;
+ data |= dw_m1 << 58;
+ data |= dw_m1 << 61;
+
+ return data;
+}
+
static __rte_always_inline void
cn10k_nix_tx_skeleton(const struct cn10k_eth_txq *txq, uint64_t *cmd,
const uint16_t flags)
cmd[1] = 0;
}
+static __rte_always_inline void
+cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
+ uint8_t loff, uint8_t shft)
+{
+ uint64_t data;
+ uintptr_t pa;
+
+ /* Check if there is any CPT instruction to submit */
+ if (!lnum && !loff)
+ return;
+
+ data = cn10k_cpt_tx_steor_data();
+ /* Update lmtline use for partial end line */
+ if (loff) {
+ data &= ~(0x7ULL << shft);
+ /* Update it to half full i.e 64B */
+ data |= (0x3UL << shft);
+ }
+
+ pa = io_addr | ((data >> 16) & 0x7) << 4;
+ data &= ~(0x7ULL << 16);
+ /* Update lines - 1 that contain valid data */
+ data |= ((uint64_t)(lnum + loff - 1)) << 12;
+ data |= lmt_id;
+
+ /* STEOR */
+ roc_lmt_submit_steorl(data, pa);
+}
+
+#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline void
+cn10k_nix_prep_sec_vec(struct rte_mbuf *m, uint64x2_t *cmd0, uint64x2_t *cmd1,
+ uintptr_t *nixtx_addr, uintptr_t lbase, uint8_t *lnum,
+ uint8_t *loff, uint8_t *shft, uint64_t sa_base,
+ const uint16_t flags)
+{
+ struct cn10k_sec_sess_priv sess_priv;
+ uint32_t pkt_len, dlen_adj, rlen;
+ uint64x2_t cmd01, cmd23;
+ uintptr_t dptr, nixtx;
+ uint64_t ucode_cmd[4];
+ uint64_t *laddr;
+ uint8_t l2_len;
+ uint16_t tag;
+ uint64_t sa;
+
+ sess_priv.u64 = *rte_security_dynfield(m);
+
+ if (flags & NIX_TX_NEED_SEND_HDR_W1)
+ l2_len = vgetq_lane_u8(*cmd0, 8);
+ else
+ l2_len = m->l2_len;
+
+ /* Retrieve DPTR */
+ dptr = vgetq_lane_u64(*cmd1, 1);
+ pkt_len = vgetq_lane_u16(*cmd0, 0);
+
+ /* Calculate dlen adj */
+ dlen_adj = pkt_len - l2_len;
+ rlen = (dlen_adj + sess_priv.roundup_len) +
+ (sess_priv.roundup_byte - 1);
+ rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
+ rlen += sess_priv.partial_len;
+ dlen_adj = rlen - dlen_adj;
+
+ /* Update send descriptors. Security is single segment only */
+ *cmd0 = vsetq_lane_u16(pkt_len + dlen_adj, *cmd0, 0);
+ *cmd1 = vsetq_lane_u16(pkt_len + dlen_adj, *cmd1, 0);
+
+ /* Get area where NIX descriptor needs to be stored */
+ nixtx = dptr + pkt_len + dlen_adj;
+ nixtx += BIT_ULL(7);
+ nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
+
+ /* Return nixtx addr */
+ *nixtx_addr = (nixtx + 16);
+
+ /* DLEN passed is excluding L2HDR */
+ pkt_len -= l2_len;
+ tag = sa_base & 0xFFFFUL;
+ sa_base &= ~0xFFFFUL;
+ sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+ ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+ ucode_cmd[0] =
+ (ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | pkt_len);
+
+ /* CPT Word 0 and Word 1 */
+ cmd01 = vdupq_n_u64((nixtx + 16) | (cn10k_nix_tx_ext_subs(flags) + 1));
+ /* CPT_RES_S is 16B above NIXTX */
+ cmd01 = vsetq_lane_u8(nixtx & BIT_ULL(7), cmd01, 8);
+
+ /* CPT word 2 and 3 */
+ cmd23 = vdupq_n_u64(0);
+ cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) | tag |
+ CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
+ cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+
+ dptr += l2_len;
+ ucode_cmd[1] = dptr;
+ ucode_cmd[2] = dptr;
+
+ /* Move to our line */
+ laddr = LMT_OFF(lbase, *lnum, *loff ? 64 : 0);
+
+ /* Write CPT instruction to lmt line */
+ vst1q_u64(laddr, cmd01);
+ vst1q_u64((laddr + 2), cmd23);
+
+ *(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+ *(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+ /* Move to next line for every other CPT inst */
+ *loff = !(*loff);
+ *lnum = *lnum + (*loff ? 0 : 1);
+ *shft = *shft + (*loff ? 0 : 3);
+}
+
+static __rte_always_inline void
+cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
+ uintptr_t lbase, uint8_t *lnum, uint8_t *loff, uint8_t *shft,
+ uint64_t sa_base, const uint16_t flags)
+{
+ struct cn10k_sec_sess_priv sess_priv;
+ uint32_t pkt_len, dlen_adj, rlen;
+ struct nix_send_hdr_s *send_hdr;
+ uint64x2_t cmd01, cmd23;
+ union nix_send_sg_s *sg;
+ uintptr_t dptr, nixtx;
+ uint64_t ucode_cmd[4];
+ uint64_t *laddr;
+ uint8_t l2_len;
+ uint16_t tag;
+ uint64_t sa;
+
+ /* Move to our line from base */
+ sess_priv.u64 = *rte_security_dynfield(m);
+ send_hdr = (struct nix_send_hdr_s *)cmd;
+ if (flags & NIX_TX_NEED_EXT_HDR)
+ sg = (union nix_send_sg_s *)&cmd[4];
+ else
+ sg = (union nix_send_sg_s *)&cmd[2];
+
+ if (flags & NIX_TX_NEED_SEND_HDR_W1)
+ l2_len = cmd[1] & 0xFF;
+ else
+ l2_len = m->l2_len;
+
+ /* Retrieve DPTR */
+ dptr = *(uint64_t *)(sg + 1);
+ pkt_len = send_hdr->w0.total;
+
+ /* Calculate dlen adj */
+ dlen_adj = pkt_len - l2_len;
+ rlen = (dlen_adj + sess_priv.roundup_len) +
+ (sess_priv.roundup_byte - 1);
+ rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
+ rlen += sess_priv.partial_len;
+ dlen_adj = rlen - dlen_adj;
+
+ /* Update send descriptors. Security is single segment only */
+ send_hdr->w0.total = pkt_len + dlen_adj;
+ sg->seg1_size = pkt_len + dlen_adj;
+
+ /* Get area where NIX descriptor needs to be stored */
+ nixtx = dptr + pkt_len + dlen_adj;
+ nixtx += BIT_ULL(7);
+ nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
+
+ /* Return nixtx addr */
+ *nixtx_addr = (nixtx + 16);
+
+ /* DLEN passed is excluding L2HDR */
+ pkt_len -= l2_len;
+ tag = sa_base & 0xFFFFUL;
+ sa_base &= ~0xFFFFUL;
+ sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+ ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+ ucode_cmd[0] =
+ (ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | pkt_len);
+
+ /* CPT Word 0 and Word 1. Assume no multi-seg support */
+ cmd01 = vdupq_n_u64((nixtx + 16) | (cn10k_nix_tx_ext_subs(flags) + 1));
+ /* CPT_RES_S is 16B above NIXTX */
+ cmd01 = vsetq_lane_u8(nixtx & BIT_ULL(7), cmd01, 8);
+
+ /* CPT word 2 and 3 */
+ cmd23 = vdupq_n_u64(0);
+ cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) | tag |
+ CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
+ cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+
+ dptr += l2_len;
+ ucode_cmd[1] = dptr;
+ ucode_cmd[2] = dptr;
+
+ /* Move to our line */
+ laddr = LMT_OFF(lbase, *lnum, *loff ? 64 : 0);
+
+ /* Write CPT instruction to lmt line */
+ vst1q_u64(laddr, cmd01);
+ vst1q_u64((laddr + 2), cmd23);
+
+ *(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+ *(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+ /* Move to next line for every other CPT inst */
+ *loff = !(*loff);
+ *lnum = *lnum + (*loff ? 0 : 1);
+ *shft = *shft + (*loff ? 0 : 3);
+}
+
+#else
+
+static __rte_always_inline void
+cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
+ uintptr_t lbase, uint8_t *lnum, uint8_t *loff, uint8_t *shft,
+ uint64_t sa_base, const uint16_t flags)
+{
+ RTE_SET_USED(m);
+ RTE_SET_USED(cmd);
+ RTE_SET_USED(nixtx_addr);
+ RTE_SET_USED(lbase);
+ RTE_SET_USED(lnum);
+ RTE_SET_USED(loff);
+ RTE_SET_USED(shft);
+ RTE_SET_USED(sa_base);
+ RTE_SET_USED(flags);
+}
+#endif
+
static __rte_always_inline void
cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
{
}
static __rte_always_inline void
-cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, uintptr_t lmt_addr,
- const uint16_t flags, const uint64_t lso_tun_fmt)
+cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
+ const uint64_t lso_tun_fmt, bool *sec)
{
struct nix_send_ext_s *send_hdr_ext;
struct nix_send_hdr_s *send_hdr;
sg = (union nix_send_sg_s *)(cmd + 2);
}
- if (flags & NIX_TX_NEED_SEND_HDR_W1) {
+ if (flags & (NIX_TX_NEED_SEND_HDR_W1 | NIX_TX_OFFLOAD_SECURITY_F)) {
ol_flags = m->ol_flags;
w1.u = 0;
}
- if (!(flags & NIX_TX_MULTI_SEG_F)) {
+ if (!(flags & NIX_TX_MULTI_SEG_F))
send_hdr->w0.total = m->data_len;
- send_hdr->w0.aura =
- roc_npa_aura_handle_to_aura(m->pool->pool_id);
- }
+ else
+ send_hdr->w0.total = m->pkt_len;
+ send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
/*
* L3type: 2 => IPV4
send_hdr->w1.u = w1.u;
if (!(flags & NIX_TX_MULTI_SEG_F)) {
- sg->seg1_size = m->data_len;
+ sg->seg1_size = send_hdr->w0.total;
*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
/* Mark mempool object as "put" since it is freed by NIX */
if (!send_hdr->w0.df)
__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ } else {
+ sg->seg1_size = m->data_len;
+ *(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
+
+ /* NOFF is handled later for multi-seg */
}
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F)
+ *sec = !!(ol_flags & PKT_TX_SEC_OFFLOAD);
+}
+
+static __rte_always_inline void
+cn10k_nix_xmit_mv_lmt_base(uintptr_t lmt_addr, uint64_t *cmd,
+ const uint16_t flags)
+{
+ struct nix_send_ext_s *send_hdr_ext;
+ union nix_send_sg_s *sg;
+
/* With minimal offloads, 'cmd' being local could be optimized out to
* registers. In other cases, 'cmd' will be in stack. Intent is
* 'cmd' stores content from txq->cmd which is copied only once.
*/
- *((struct nix_send_hdr_s *)lmt_addr) = *send_hdr;
+ *((struct nix_send_hdr_s *)lmt_addr) = *(struct nix_send_hdr_s *)cmd;
lmt_addr += 16;
if (flags & NIX_TX_NEED_EXT_HDR) {
+ send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);
*((struct nix_send_ext_s *)lmt_addr) = *send_hdr_ext;
lmt_addr += 16;
+
+ sg = (union nix_send_sg_s *)(cmd + 4);
+ } else {
+ sg = (union nix_send_sg_s *)(cmd + 2);
}
/* In case of multi-seg, sg template is stored here */
*((union nix_send_sg_s *)lmt_addr) = *sg;
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
struct nix_send_ext_s *send_hdr_ext =
- (struct nix_send_ext_s *)lmt_addr + 16;
+ (struct nix_send_ext_s *)lmt_addr + 16;
uint64_t *lmt = (uint64_t *)lmt_addr;
uint16_t off = (no_segdw - 1) << 1;
struct nix_send_mem_s *send_mem;
uint8_t off, i;
send_hdr = (struct nix_send_hdr_s *)cmd;
- send_hdr->w0.total = m->pkt_len;
- send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
if (flags & NIX_TX_NEED_EXT_HDR)
off = 2;
off = 0;
sg = (union nix_send_sg_s *)&cmd[2 + off];
- /* Clear sg->u header before use */
- sg->u &= 0xFC00000000000000;
+
+ /* Start from second segment, first segment is already there */
+ i = 1;
sg_u = sg->u;
- slist = &cmd[3 + off];
+ nb_segs = m->nb_segs - 1;
+ m_next = m->next;
+ slist = &cmd[3 + off + 1];
+
+ /* Set invert df if buffer is not to be freed by H/W */
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+ sg_u |= (cnxk_nix_prefree_seg(m) << 55);
- i = 0;
- nb_segs = m->nb_segs;
+ /* Mark mempool object as "put" since it is freed by NIX */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (!(sg_u & (1ULL << 55)))
+ __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ rte_io_wmb();
+#endif
+ m = m_next;
+ if (!m)
+ goto done;
/* Fill mbuf segments */
do {
m = m_next;
} while (nb_segs);
+done:
sg->u = sg_u;
sg->segs = i;
segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
{
struct cn10k_eth_txq *txq = tx_queue;
const rte_iova_t io_addr = txq->io_addr;
- uintptr_t pa, lmt_addr = txq->lmt_base;
+ uint8_t lnum, c_lnum, c_shft, c_loff;
+ uintptr_t pa, lbase = txq->lmt_base;
uint16_t lmt_id, burst, left, i;
+ uintptr_t c_lbase = lbase;
+ rte_iova_t c_io_addr;
uint64_t lso_tun_fmt;
+ uint16_t c_lmt_id;
+ uint64_t sa_base;
+ uintptr_t laddr;
uint64_t data;
+ bool sec;
if (!(flags & NIX_TX_VWQE_F)) {
NIX_XMIT_FC_OR_RETURN(txq, pkts);
lso_tun_fmt = txq->lso_tun_fmt;
/* Get LMT base address and LMT ID as lcore id */
- ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+ ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+ c_io_addr = txq->cpt_io_addr;
+ sa_base = txq->sa_base;
+ }
+
left = pkts;
again:
burst = left > 32 ? 32 : left;
+
+ lnum = 0;
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ c_lnum = 0;
+ c_loff = 0;
+ c_shft = 16;
+ }
+
for (i = 0; i < burst; i++) {
/* Perform header writes for TSO, barrier at
* lmt steorl will suffice.
if (flags & NIX_TX_OFFLOAD_TSO_F)
cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
- cn10k_nix_xmit_prepare(tx_pkts[i], cmd, lmt_addr, flags,
- lso_tun_fmt);
- cn10k_nix_xmit_prepare_tstamp(lmt_addr, &txq->cmd[0],
+ cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+ &sec);
+
+ laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
+
+ /* Prepare CPT instruction and get nixtx addr */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+ cn10k_nix_prep_sec(tx_pkts[i], cmd, &laddr, c_lbase,
+ &c_lnum, &c_loff, &c_shft, sa_base,
+ flags);
+
+ /* Move NIX desc to LMT/NIXTX area */
+ cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+ cn10k_nix_xmit_prepare_tstamp(laddr, &txq->cmd[0],
tx_pkts[i]->ol_flags, 4, flags);
- lmt_addr += (1ULL << ROC_LMT_LINE_SIZE_LOG2);
+ if (!(flags & NIX_TX_OFFLOAD_SECURITY_F) || !sec)
+ lnum++;
}
if (flags & NIX_TX_VWQE_F)
roc_sso_hws_head_wait(base);
+ left -= burst;
+ tx_pkts += burst;
+
+ /* Submit CPT instructions if any */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ /* Reduce pkts to be sent to CPT */
+ burst -= ((c_lnum << 1) + c_loff);
+ cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
+ c_shft);
+ }
+
/* Trigger LMTST */
if (burst > 16) {
data = cn10k_nix_tx_steor_data(flags);
roc_lmt_submit_steorl(data, pa);
}
- left -= burst;
rte_io_wmb();
- if (left) {
- /* Start processing another burst */
- tx_pkts += burst;
- /* Reset lmt base addr */
- lmt_addr -= (1ULL << ROC_LMT_LINE_SIZE_LOG2);
- lmt_addr &= (~(BIT_ULL(ROC_LMT_BASE_PER_CORE_LOG2) - 1));
+ if (left)
goto again;
- }
return pkts;
}
const uint16_t flags)
{
struct cn10k_eth_txq *txq = tx_queue;
- uintptr_t pa0, pa1, lmt_addr = txq->lmt_base;
+ uintptr_t pa0, pa1, lbase = txq->lmt_base;
const rte_iova_t io_addr = txq->io_addr;
uint16_t segdw, lmt_id, burst, left, i;
+ uint8_t lnum, c_lnum, c_loff;
+ uintptr_t c_lbase = lbase;
uint64_t data0, data1;
+ rte_iova_t c_io_addr;
uint64_t lso_tun_fmt;
+ uint8_t shft, c_shft;
__uint128_t data128;
- uint16_t shft;
+ uint16_t c_lmt_id;
+ uint64_t sa_base;
+ uintptr_t laddr;
+ bool sec;
NIX_XMIT_FC_OR_RETURN(txq, pkts);
lso_tun_fmt = txq->lso_tun_fmt;
/* Get LMT base address and LMT ID as lcore id */
- ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+ ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+ c_io_addr = txq->cpt_io_addr;
+ sa_base = txq->sa_base;
+ }
+
left = pkts;
again:
burst = left > 32 ? 32 : left;
shft = 16;
data128 = 0;
+
+ lnum = 0;
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ c_lnum = 0;
+ c_loff = 0;
+ c_shft = 16;
+ }
+
for (i = 0; i < burst; i++) {
/* Perform header writes for TSO, barrier at
* lmt steorl will suffice.
if (flags & NIX_TX_OFFLOAD_TSO_F)
cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
- cn10k_nix_xmit_prepare(tx_pkts[i], cmd, lmt_addr, flags,
- lso_tun_fmt);
+ cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+ &sec);
+
+ laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
+
+ /* Prepare CPT instruction and get nixtx addr */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+ cn10k_nix_prep_sec(tx_pkts[i], cmd, &laddr, c_lbase,
+ &c_lnum, &c_loff, &c_shft, sa_base,
+ flags);
+
+ /* Move NIX desc to LMT/NIXTX area */
+ cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+
/* Store sg list directly on lmt line */
- segdw = cn10k_nix_prepare_mseg(tx_pkts[i], (uint64_t *)lmt_addr,
+ segdw = cn10k_nix_prepare_mseg(tx_pkts[i], (uint64_t *)laddr,
flags);
- cn10k_nix_xmit_prepare_tstamp(lmt_addr, &txq->cmd[0],
+ cn10k_nix_xmit_prepare_tstamp(laddr, &txq->cmd[0],
tx_pkts[i]->ol_flags, segdw,
flags);
- lmt_addr += (1ULL << ROC_LMT_LINE_SIZE_LOG2);
- data128 |= (((__uint128_t)(segdw - 1)) << shft);
- shft += 3;
+ if (!(flags & NIX_TX_OFFLOAD_SECURITY_F) || !sec) {
+ lnum++;
+ data128 |= (((__uint128_t)(segdw - 1)) << shft);
+ shft += 3;
+ }
}
if (flags & NIX_TX_VWQE_F)
roc_sso_hws_head_wait(base);
+ left -= burst;
+ tx_pkts += burst;
+
+ /* Submit CPT instructions if any */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ /* Reduce pkts to be sent to CPT */
+ burst -= ((c_lnum << 1) + c_loff);
+ cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
+ c_shft);
+ }
+
data0 = (uint64_t)data128;
data1 = (uint64_t)(data128 >> 64);
/* Make data0 similar to data1 */
roc_lmt_submit_steorl(data0, pa0);
}
- left -= burst;
rte_io_wmb();
- if (left) {
- /* Start processing another burst */
- tx_pkts += burst;
- /* Reset lmt base addr */
- lmt_addr -= (1ULL << ROC_LMT_LINE_SIZE_LOG2);
- lmt_addr &= (~(BIT_ULL(ROC_LMT_BASE_PER_CORE_LOG2) - 1));
+ if (left)
goto again;
- }
return pkts;
}
return lmt_used;
}
+static __rte_always_inline void
+cn10k_nix_lmt_next(uint8_t dw, uintptr_t laddr, uint8_t *lnum, uint8_t *loff,
+ uint8_t *shift, __uint128_t *data128, uintptr_t *next)
+{
+ /* Go to next line if we are out of space */
+ if ((*loff + (dw << 4)) > 128) {
+ *data128 = *data128 |
+ (((__uint128_t)((*loff >> 4) - 1)) << *shift);
+ *shift = *shift + 3;
+ *loff = 0;
+ *lnum = *lnum + 1;
+ }
+
+ *next = (uintptr_t)LMT_OFF(laddr, *lnum, *loff);
+ *loff = *loff + (dw << 4);
+}
+
+static __rte_always_inline void
+cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr,
+ uint64x2_t cmd0, uint64x2_t cmd1, uint64x2_t cmd2,
+ uint64x2_t cmd3, const uint16_t flags)
+{
+ uint8_t off;
+
+ /* Handle no fast free when security is enabled without mseg */
+ if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
+ (flags & NIX_TX_OFFLOAD_SECURITY_F) &&
+ !(flags & NIX_TX_MULTI_SEG_F)) {
+ union nix_send_sg_s sg;
+
+ sg.u = vgetq_lane_u64(cmd1, 0);
+ sg.u |= (cnxk_nix_prefree_seg(mbuf) << 55);
+ cmd1 = vsetq_lane_u64(sg.u, cmd1, 0);
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ sg.u = vgetq_lane_u64(cmd1, 0);
+ if (!(sg.u & (1ULL << 55)))
+ __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1,
+ 0);
+ rte_io_wmb();
+#endif
+ }
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ if ((flags & NIX_TX_NEED_EXT_HDR) &&
+ (flags & NIX_TX_OFFLOAD_TSTAMP_F)) {
+ cn10k_nix_prepare_mseg_vec(mbuf, LMT_OFF(laddr, 0, 48),
+ &cmd0, &cmd1, segdw, flags);
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+ vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+ off = segdw - 4;
+ off <<= 4;
+ vst1q_u64(LMT_OFF(laddr, 0, 48 + off), cmd3);
+ } else if (flags & NIX_TX_NEED_EXT_HDR) {
+ cn10k_nix_prepare_mseg_vec(mbuf, LMT_OFF(laddr, 0, 48),
+ &cmd0, &cmd1, segdw, flags);
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+ vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+ } else {
+ cn10k_nix_prepare_mseg_vec(mbuf, LMT_OFF(laddr, 0, 32),
+ &cmd0, &cmd1, segdw, flags);
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd1);
+ }
+ } else if (flags & NIX_TX_NEED_EXT_HDR) {
+ /* Store the prepared send desc to LMT lines */
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+ vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+ vst1q_u64(LMT_OFF(laddr, 0, 48), cmd3);
+ } else {
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+ vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+ }
+ } else {
+ /* Store the prepared send desc to LMT lines */
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd1);
+ }
+}
+
static __rte_always_inline uint16_t
cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t pkts, uint64_t *cmd, uintptr_t base,
uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3;
uint64x2_t cmd0[NIX_DESCS_PER_LOOP], cmd1[NIX_DESCS_PER_LOOP],
cmd2[NIX_DESCS_PER_LOOP], cmd3[NIX_DESCS_PER_LOOP];
+ uint16_t left, scalar, burst, i, lmt_id, c_lmt_id;
uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3, pa;
uint64x2_t senddesc01_w0, senddesc23_w0;
uint64x2_t senddesc01_w1, senddesc23_w1;
- uint16_t left, scalar, burst, i, lmt_id;
uint64x2_t sendext01_w0, sendext23_w0;
uint64x2_t sendext01_w1, sendext23_w1;
uint64x2_t sendmem01_w0, sendmem23_w0;
uint64x2_t sgdesc01_w0, sgdesc23_w0;
uint64x2_t sgdesc01_w1, sgdesc23_w1;
struct cn10k_eth_txq *txq = tx_queue;
- uintptr_t laddr = txq->lmt_base;
rte_iova_t io_addr = txq->io_addr;
+ uintptr_t laddr = txq->lmt_base;
+ uint8_t c_lnum, c_shft, c_loff;
uint64x2_t ltypes01, ltypes23;
uint64x2_t xtmp128, ytmp128;
uint64x2_t xmask01, xmask23;
- uint8_t lnum, shift;
+ uintptr_t c_laddr = laddr;
+ uint8_t lnum, shift, loff;
+ rte_iova_t c_io_addr;
+ uint64_t sa_base;
union wdata {
__uint128_t data128;
uint64_t data[2];
/* Get LMT base address and LMT ID as lcore id */
ROC_LMT_BASE_ID_GET(laddr, lmt_id);
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ ROC_LMT_CPT_BASE_ID_GET(c_laddr, c_lmt_id);
+ c_io_addr = txq->cpt_io_addr;
+ sa_base = txq->sa_base;
+ }
+
left = pkts;
again:
/* Number of packets to prepare depends on offloads enabled. */
burst = left > cn10k_nix_pkts_per_vec_brst(flags) ?
cn10k_nix_pkts_per_vec_brst(flags) :
left;
- if (flags & NIX_TX_MULTI_SEG_F) {
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)) {
wd.data128 = 0;
shift = 16;
}
lnum = 0;
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ loff = 0;
+ c_loff = 0;
+ c_lnum = 0;
+ c_shft = 16;
+ }
for (i = 0; i < burst; i += NIX_DESCS_PER_LOOP) {
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && c_lnum + 2 > 16) {
+ burst = i;
+ break;
+ }
+
if (flags & NIX_TX_MULTI_SEG_F) {
uint8_t j;
}
if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
- !(flags & NIX_TX_MULTI_SEG_F)) {
+ !(flags & NIX_TX_MULTI_SEG_F) &&
+ !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
/* Set don't free bit if reference count > 1 */
xmask01 = vdupq_n_u64(0);
xmask23 = xmask01;
(void **)&mbuf3, 1, 0);
senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
- } else if (!(flags & NIX_TX_MULTI_SEG_F)) {
+ } else if (!(flags & NIX_TX_MULTI_SEG_F) &&
+ !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
/* Move mbufs to iova */
mbuf0 = (uint64_t *)tx_pkts[0];
mbuf1 = (uint64_t *)tx_pkts[1];
cmd2[3] = vzip2q_u64(sendext23_w0, sendext23_w1);
}
- if (flags & NIX_TX_MULTI_SEG_F) {
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ const uint64x2_t olf = {PKT_TX_SEC_OFFLOAD,
+ PKT_TX_SEC_OFFLOAD};
+ uintptr_t next;
+ uint8_t dw;
+
+ /* Extract ol_flags. */
+ xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
+ ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
+
+ xtmp128 = vtstq_u64(olf, xtmp128);
+ ytmp128 = vtstq_u64(olf, ytmp128);
+
+ /* Process mbuf0 */
+ dw = cn10k_nix_tx_dwords(flags, segdw[0]);
+ if (vgetq_lane_u64(xtmp128, 0))
+ cn10k_nix_prep_sec_vec(tx_pkts[0], &cmd0[0],
+ &cmd1[0], &next, c_laddr,
+ &c_lnum, &c_loff,
+ &c_shft, sa_base, flags);
+ else
+ cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+ &shift, &wd.data128, &next);
+
+ /* Store mbuf0 to LMTLINE/CPT NIXTX area */
+ cn10k_nix_xmit_store(tx_pkts[0], segdw[0], next,
+ cmd0[0], cmd1[0], cmd2[0], cmd3[0],
+ flags);
+
+ /* Process mbuf1 */
+ dw = cn10k_nix_tx_dwords(flags, segdw[1]);
+ if (vgetq_lane_u64(xtmp128, 1))
+ cn10k_nix_prep_sec_vec(tx_pkts[1], &cmd0[1],
+ &cmd1[1], &next, c_laddr,
+ &c_lnum, &c_loff,
+ &c_shft, sa_base, flags);
+ else
+ cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+ &shift, &wd.data128, &next);
+
+ /* Store mbuf1 to LMTLINE/CPT NIXTX area */
+ cn10k_nix_xmit_store(tx_pkts[1], segdw[1], next,
+ cmd0[1], cmd1[1], cmd2[1], cmd3[1],
+ flags);
+
+ /* Process mbuf2 */
+ dw = cn10k_nix_tx_dwords(flags, segdw[2]);
+ if (vgetq_lane_u64(ytmp128, 0))
+ cn10k_nix_prep_sec_vec(tx_pkts[2], &cmd0[2],
+ &cmd1[2], &next, c_laddr,
+ &c_lnum, &c_loff,
+ &c_shft, sa_base, flags);
+ else
+ cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+ &shift, &wd.data128, &next);
+
+ /* Store mbuf2 to LMTLINE/CPT NIXTX area */
+ cn10k_nix_xmit_store(tx_pkts[2], segdw[2], next,
+ cmd0[2], cmd1[2], cmd2[2], cmd3[2],
+ flags);
+
+ /* Process mbuf3 */
+ dw = cn10k_nix_tx_dwords(flags, segdw[3]);
+ if (vgetq_lane_u64(ytmp128, 1))
+ cn10k_nix_prep_sec_vec(tx_pkts[3], &cmd0[3],
+ &cmd1[3], &next, c_laddr,
+ &c_lnum, &c_loff,
+ &c_shft, sa_base, flags);
+ else
+ cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+ &shift, &wd.data128, &next);
+
+ /* Store mbuf3 to LMTLINE/CPT NIXTX area */
+ cn10k_nix_xmit_store(tx_pkts[3], segdw[3], next,
+ cmd0[3], cmd1[3], cmd2[3], cmd3[3],
+ flags);
+
+ } else if (flags & NIX_TX_MULTI_SEG_F) {
uint8_t j;
segdw[4] = 8;
tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP;
}
- if (flags & NIX_TX_MULTI_SEG_F)
+ /* Roundup lnum to last line if it is partial */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ lnum = lnum + !!loff;
+ wd.data128 = wd.data128 |
+ (((__uint128_t)(((loff >> 4) - 1) & 0x7) << shift));
+ }
+
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
wd.data[0] >>= 16;
if (flags & NIX_TX_VWQE_F)
roc_sso_hws_head_wait(base);
+ left -= burst;
+
+ /* Submit CPT instructions if any */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F)
+ cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
+ c_shft);
+
/* Trigger LMTST */
if (lnum > 16) {
- if (!(flags & NIX_TX_MULTI_SEG_F))
+ if (!(flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)))
wd.data[0] = cn10k_nix_tx_steor_vec_data(flags);
pa = io_addr | (wd.data[0] & 0x7) << 4;
wd.data[0] &= ~0x7ULL;
- if (flags & NIX_TX_MULTI_SEG_F)
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
wd.data[0] <<= 16;
wd.data[0] |= (15ULL << 12);
/* STEOR0 */
roc_lmt_submit_steorl(wd.data[0], pa);
- if (!(flags & NIX_TX_MULTI_SEG_F))
+ if (!(flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)))
wd.data[1] = cn10k_nix_tx_steor_vec_data(flags);
pa = io_addr | (wd.data[1] & 0x7) << 4;
wd.data[1] &= ~0x7ULL;
- if (flags & NIX_TX_MULTI_SEG_F)
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
wd.data[1] <<= 16;
wd.data[1] |= ((uint64_t)(lnum - 17)) << 12;
/* STEOR1 */
roc_lmt_submit_steorl(wd.data[1], pa);
} else if (lnum) {
- if (!(flags & NIX_TX_MULTI_SEG_F))
+ if (!(flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)))
wd.data[0] = cn10k_nix_tx_steor_vec_data(flags);
pa = io_addr | (wd.data[0] & 0x7) << 4;
wd.data[0] &= ~0x7ULL;
- if (flags & NIX_TX_MULTI_SEG_F)
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
wd.data[0] <<= 16;
wd.data[0] |= ((uint64_t)(lnum - 1)) << 12;
roc_lmt_submit_steorl(wd.data[0], pa);
}
- left -= burst;
rte_io_wmb();
if (left)
goto again;
#define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F
#define TSO_F NIX_TX_OFFLOAD_TSO_F
#define TSP_F NIX_TX_OFFLOAD_TSTAMP_F
+#define T_SEC_F NIX_TX_OFFLOAD_SECURITY_F
-/* [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
+/* [T_SEC_F] [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
#define NIX_TX_FASTPATH_MODES \
-T(no_offload, 0, 0, 0, 0, 0, 0, 4, \
+T(no_offload, 0, 0, 0, 0, 0, 0, 0, 4, \
NIX_TX_OFFLOAD_NONE) \
-T(l3l4csum, 0, 0, 0, 0, 0, 1, 4, \
+T(l3l4csum, 0, 0, 0, 0, 0, 0, 1, 4, \
L3L4CSUM_F) \
-T(ol3ol4csum, 0, 0, 0, 0, 1, 0, 4, \
+T(ol3ol4csum, 0, 0, 0, 0, 0, 1, 0, 4, \
OL3OL4CSUM_F) \
-T(ol3ol4csum_l3l4csum, 0, 0, 0, 0, 1, 1, 4, \
+T(ol3ol4csum_l3l4csum, 0, 0, 0, 0, 0, 1, 1, 4, \
OL3OL4CSUM_F | L3L4CSUM_F) \
-T(vlan, 0, 0, 0, 1, 0, 0, 6, \
+T(vlan, 0, 0, 0, 0, 1, 0, 0, 6, \
VLAN_F) \
-T(vlan_l3l4csum, 0, 0, 0, 1, 0, 1, 6, \
+T(vlan_l3l4csum, 0, 0, 0, 0, 1, 0, 1, 6, \
VLAN_F | L3L4CSUM_F) \
-T(vlan_ol3ol4csum, 0, 0, 0, 1, 1, 0, 6, \
+T(vlan_ol3ol4csum, 0, 0, 0, 0, 1, 1, 0, 6, \
VLAN_F | OL3OL4CSUM_F) \
-T(vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 1, 6, \
+T(vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 0, 1, 1, 1, 6, \
VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(noff, 0, 0, 1, 0, 0, 0, 4, \
+T(noff, 0, 0, 0, 1, 0, 0, 0, 4, \
NOFF_F) \
-T(noff_l3l4csum, 0, 0, 1, 0, 0, 1, 4, \
+T(noff_l3l4csum, 0, 0, 0, 1, 0, 0, 1, 4, \
NOFF_F | L3L4CSUM_F) \
-T(noff_ol3ol4csum, 0, 0, 1, 0, 1, 0, 4, \
+T(noff_ol3ol4csum, 0, 0, 0, 1, 0, 1, 0, 4, \
NOFF_F | OL3OL4CSUM_F) \
-T(noff_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 1, 1, 4, \
+T(noff_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 0, 1, 1, 4, \
NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(noff_vlan, 0, 0, 1, 1, 0, 0, 6, \
+T(noff_vlan, 0, 0, 0, 1, 1, 0, 0, 6, \
NOFF_F | VLAN_F) \
-T(noff_vlan_l3l4csum, 0, 0, 1, 1, 0, 1, 6, \
+T(noff_vlan_l3l4csum, 0, 0, 0, 1, 1, 0, 1, 6, \
NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(noff_vlan_ol3ol4csum, 0, 0, 1, 1, 1, 0, 6, \
+T(noff_vlan_ol3ol4csum, 0, 0, 0, 1, 1, 1, 0, 6, \
NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 1, 6, \
+T(noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 1, 1, 6, \
NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso, 0, 1, 0, 0, 0, 0, 6, \
+T(tso, 0, 0, 1, 0, 0, 0, 0, 6, \
TSO_F) \
-T(tso_l3l4csum, 0, 1, 0, 0, 0, 1, 6, \
+T(tso_l3l4csum, 0, 0, 1, 0, 0, 0, 1, 6, \
TSO_F | L3L4CSUM_F) \
-T(tso_ol3ol4csum, 0, 1, 0, 0, 1, 0, 6, \
+T(tso_ol3ol4csum, 0, 0, 1, 0, 0, 1, 0, 6, \
TSO_F | OL3OL4CSUM_F) \
-T(tso_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 1, 1, 6, \
+T(tso_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 0, 1, 1, 6, \
TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_vlan, 0, 1, 0, 1, 0, 0, 6, \
+T(tso_vlan, 0, 0, 1, 0, 1, 0, 0, 6, \
TSO_F | VLAN_F) \
-T(tso_vlan_l3l4csum, 0, 1, 0, 1, 0, 1, 6, \
+T(tso_vlan_l3l4csum, 0, 0, 1, 0, 1, 0, 1, 6, \
TSO_F | VLAN_F | L3L4CSUM_F) \
-T(tso_vlan_ol3ol4csum, 0, 1, 0, 1, 1, 0, 6, \
+T(tso_vlan_ol3ol4csum, 0, 0, 1, 0, 1, 1, 0, 6, \
TSO_F | VLAN_F | OL3OL4CSUM_F) \
-T(tso_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 1, 6, \
+T(tso_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 1, 1, 1, 6, \
TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_noff, 0, 1, 1, 0, 0, 0, 6, \
+T(tso_noff, 0, 0, 1, 1, 0, 0, 0, 6, \
TSO_F | NOFF_F) \
-T(tso_noff_l3l4csum, 0, 1, 1, 0, 0, 1, 6, \
+T(tso_noff_l3l4csum, 0, 0, 1, 1, 0, 0, 1, 6, \
TSO_F | NOFF_F | L3L4CSUM_F) \
-T(tso_noff_ol3ol4csum, 0, 1, 1, 0, 1, 0, 6, \
+T(tso_noff_ol3ol4csum, 0, 0, 1, 1, 0, 1, 0, 6, \
TSO_F | NOFF_F | OL3OL4CSUM_F) \
-T(tso_noff_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 1, 1, 6, \
+T(tso_noff_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 0, 1, 1, 6, \
TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_noff_vlan, 0, 1, 1, 1, 0, 0, 6, \
+T(tso_noff_vlan, 0, 0, 1, 1, 1, 0, 0, 6, \
TSO_F | NOFF_F | VLAN_F) \
-T(tso_noff_vlan_l3l4csum, 0, 1, 1, 1, 0, 1, 6, \
+T(tso_noff_vlan_l3l4csum, 0, 0, 1, 1, 1, 0, 1, 6, \
TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(tso_noff_vlan_ol3ol4csum, 0, 1, 1, 1, 1, 0, 6, \
+T(tso_noff_vlan_ol3ol4csum, 0, 0, 1, 1, 1, 1, 0, 6, \
TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(tso_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 1, 6, \
+T(tso_noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 1, 1, 6, \
TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts, 1, 0, 0, 0, 0, 0, 8, \
+T(ts, 0, 1, 0, 0, 0, 0, 0, 8, \
TSP_F) \
-T(ts_l3l4csum, 1, 0, 0, 0, 0, 1, 8, \
+T(ts_l3l4csum, 0, 1, 0, 0, 0, 0, 1, 8, \
TSP_F | L3L4CSUM_F) \
-T(ts_ol3ol4csum, 1, 0, 0, 0, 1, 0, 8, \
+T(ts_ol3ol4csum, 0, 1, 0, 0, 0, 1, 0, 8, \
TSP_F | OL3OL4CSUM_F) \
-T(ts_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 1, 1, 8, \
+T(ts_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 0, 1, 1, 8, \
TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_vlan, 1, 0, 0, 1, 0, 0, 8, \
+T(ts_vlan, 0, 1, 0, 0, 1, 0, 0, 8, \
TSP_F | VLAN_F) \
-T(ts_vlan_l3l4csum, 1, 0, 0, 1, 0, 1, 8, \
+T(ts_vlan_l3l4csum, 0, 1, 0, 0, 1, 0, 1, 8, \
TSP_F | VLAN_F | L3L4CSUM_F) \
-T(ts_vlan_ol3ol4csum, 1, 0, 0, 1, 1, 0, 8, \
+T(ts_vlan_ol3ol4csum, 0, 1, 0, 0, 1, 1, 0, 8, \
TSP_F | VLAN_F | OL3OL4CSUM_F) \
-T(ts_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 1, 8, \
+T(ts_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 1, 1, 1, 8, \
TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_noff, 1, 0, 1, 0, 0, 0, 8, \
+T(ts_noff, 0, 1, 0, 1, 0, 0, 0, 8, \
TSP_F | NOFF_F) \
-T(ts_noff_l3l4csum, 1, 0, 1, 0, 0, 1, 8, \
+T(ts_noff_l3l4csum, 0, 1, 0, 1, 0, 0, 1, 8, \
TSP_F | NOFF_F | L3L4CSUM_F) \
-T(ts_noff_ol3ol4csum, 1, 0, 1, 0, 1, 0, 8, \
+T(ts_noff_ol3ol4csum, 0, 1, 0, 1, 0, 1, 0, 8, \
TSP_F | NOFF_F | OL3OL4CSUM_F) \
-T(ts_noff_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 1, 1, 8, \
+T(ts_noff_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 0, 1, 1, 8, \
TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_noff_vlan, 1, 0, 1, 1, 0, 0, 8, \
+T(ts_noff_vlan, 0, 1, 0, 1, 1, 0, 0, 8, \
TSP_F | NOFF_F | VLAN_F) \
-T(ts_noff_vlan_l3l4csum, 1, 0, 1, 1, 0, 1, 8, \
+T(ts_noff_vlan_l3l4csum, 0, 1, 0, 1, 1, 0, 1, 8, \
TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(ts_noff_vlan_ol3ol4csum, 1, 0, 1, 1, 1, 0, 8, \
+T(ts_noff_vlan_ol3ol4csum, 0, 1, 0, 1, 1, 1, 0, 8, \
TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(ts_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 1, 8, \
+T(ts_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 1, 1, 8, \
TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_tso, 1, 1, 0, 0, 0, 0, 8, \
+T(ts_tso, 0, 1, 1, 0, 0, 0, 0, 8, \
TSP_F | TSO_F) \
-T(ts_tso_l3l4csum, 1, 1, 0, 0, 0, 1, 8, \
+T(ts_tso_l3l4csum, 0, 1, 1, 0, 0, 0, 1, 8, \
TSP_F | TSO_F | L3L4CSUM_F) \
-T(ts_tso_ol3ol4csum, 1, 1, 0, 0, 1, 0, 8, \
+T(ts_tso_ol3ol4csum, 0, 1, 1, 0, 0, 1, 0, 8, \
TSP_F | TSO_F | OL3OL4CSUM_F) \
-T(ts_tso_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 1, 1, 8, \
+T(ts_tso_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 0, 1, 1, 8, \
TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_tso_vlan, 1, 1, 0, 1, 0, 0, 8, \
+T(ts_tso_vlan, 0, 1, 1, 0, 1, 0, 0, 8, \
TSP_F | TSO_F | VLAN_F) \
-T(ts_tso_vlan_l3l4csum, 1, 1, 0, 1, 0, 1, 8, \
+T(ts_tso_vlan_l3l4csum, 0, 1, 1, 0, 1, 0, 1, 8, \
TSP_F | TSO_F | VLAN_F | L3L4CSUM_F) \
-T(ts_tso_vlan_ol3ol4csum, 1, 1, 0, 1, 1, 0, 8, \
+T(ts_tso_vlan_ol3ol4csum, 0, 1, 1, 0, 1, 1, 0, 8, \
TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
-T(ts_tso_vlan_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 1, 8, \
+T(ts_tso_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 1, 1, 1, 8, \
TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_tso_noff, 1, 1, 1, 0, 0, 0, 8, \
+T(ts_tso_noff, 0, 1, 1, 1, 0, 0, 0, 8, \
TSP_F | TSO_F | NOFF_F) \
-T(ts_tso_noff_l3l4csum, 1, 1, 1, 0, 0, 1, 8, \
+T(ts_tso_noff_l3l4csum, 0, 1, 1, 1, 0, 0, 1, 8, \
TSP_F | TSO_F | NOFF_F | L3L4CSUM_F) \
-T(ts_tso_noff_ol3ol4csum, 1, 1, 1, 0, 1, 0, 8, \
+T(ts_tso_noff_ol3ol4csum, 0, 1, 1, 1, 0, 1, 0, 8, \
TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
-T(ts_tso_noff_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 1, 1, 8, \
+T(ts_tso_noff_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 0, 1, 1, 8, \
TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_tso_noff_vlan, 1, 1, 1, 1, 0, 0, 8, \
+T(ts_tso_noff_vlan, 0, 1, 1, 1, 1, 0, 0, 8, \
TSP_F | TSO_F | NOFF_F | VLAN_F) \
-T(ts_tso_noff_vlan_l3l4csum, 1, 1, 1, 1, 0, 1, 8, \
+T(ts_tso_noff_vlan_l3l4csum, 0, 1, 1, 1, 1, 0, 1, 8, \
TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(ts_tso_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 1, 0, 8, \
+T(ts_tso_noff_vlan_ol3ol4csum, 0, 1, 1, 1, 1, 1, 0, 8, \
TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 1, 8, \
- TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
-
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 1, 1, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec, 1, 0, 0, 0, 0, 0, 0, 4, \
+ T_SEC_F) \
+T(sec_l3l4csum, 1, 0, 0, 0, 0, 0, 1, 4, \
+ T_SEC_F | L3L4CSUM_F) \
+T(sec_ol3ol4csum, 1, 0, 0, 0, 0, 1, 0, 4, \
+ T_SEC_F | OL3OL4CSUM_F) \
+T(sec_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 0, 1, 1, 4, \
+ T_SEC_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_vlan, 1, 0, 0, 0, 1, 0, 0, 6, \
+ T_SEC_F | VLAN_F) \
+T(sec_vlan_l3l4csum, 1, 0, 0, 0, 1, 0, 1, 6, \
+ T_SEC_F | VLAN_F | L3L4CSUM_F) \
+T(sec_vlan_ol3ol4csum, 1, 0, 0, 0, 1, 1, 0, 6, \
+ T_SEC_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 1, 1, 1, 6, \
+ T_SEC_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_noff, 1, 0, 0, 1, 0, 0, 0, 4, \
+ T_SEC_F | NOFF_F) \
+T(sec_noff_l3l4csum, 1, 0, 0, 1, 0, 0, 1, 4, \
+ T_SEC_F | NOFF_F | L3L4CSUM_F) \
+T(sec_noff_ol3ol4csum, 1, 0, 0, 1, 0, 1, 0, 4, \
+ T_SEC_F | NOFF_F | OL3OL4CSUM_F) \
+T(sec_noff_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 0, 1, 1, 4, \
+ T_SEC_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_noff_vlan, 1, 0, 0, 1, 1, 0, 0, 6, \
+ T_SEC_F | NOFF_F | VLAN_F) \
+T(sec_noff_vlan_l3l4csum, 1, 0, 0, 1, 1, 0, 1, 6, \
+ T_SEC_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(sec_noff_vlan_ol3ol4csum, 1, 0, 0, 1, 1, 1, 0, 6, \
+ T_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 1, 1, 6, \
+ T_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_tso, 1, 0, 1, 0, 0, 0, 0, 6, \
+ T_SEC_F | TSO_F) \
+T(sec_tso_l3l4csum, 1, 0, 1, 0, 0, 0, 1, 6, \
+ T_SEC_F | TSO_F | L3L4CSUM_F) \
+T(sec_tso_ol3ol4csum, 1, 0, 1, 0, 0, 1, 0, 6, \
+ T_SEC_F | TSO_F | OL3OL4CSUM_F) \
+T(sec_tso_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 0, 1, 1, 6, \
+ T_SEC_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_tso_vlan, 1, 0, 1, 0, 1, 0, 0, 6, \
+ T_SEC_F | TSO_F | VLAN_F) \
+T(sec_tso_vlan_l3l4csum, 1, 0, 1, 0, 1, 0, 1, 6, \
+ T_SEC_F | TSO_F | VLAN_F | L3L4CSUM_F) \
+T(sec_tso_vlan_ol3ol4csum, 1, 0, 1, 0, 1, 1, 0, 6, \
+ T_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_tso_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 1, 1, 1, 6, \
+ T_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_tso_noff, 1, 0, 1, 1, 0, 0, 0, 6, \
+ T_SEC_F | TSO_F | NOFF_F) \
+T(sec_tso_noff_l3l4csum, 1, 0, 1, 1, 0, 0, 1, 6, \
+ T_SEC_F | TSO_F | NOFF_F | L3L4CSUM_F) \
+T(sec_tso_noff_ol3ol4csum, 1, 0, 1, 1, 0, 1, 0, 6, \
+ T_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
+T(sec_tso_noff_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 0, 1, 1, 6, \
+ T_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_tso_noff_vlan, 1, 0, 1, 1, 1, 0, 0, 6, \
+ T_SEC_F | TSO_F | NOFF_F | VLAN_F) \
+T(sec_tso_noff_vlan_l3l4csum, 1, 0, 1, 1, 1, 0, 1, 6, \
+ T_SEC_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(sec_tso_noff_vlan_ol3ol4csum, 1, 0, 1, 1, 1, 1, 0, 6, \
+ T_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 1, 1, 6, \
+ T_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec_ts, 1, 1, 0, 0, 0, 0, 0, 8, \
+ T_SEC_F | TSP_F) \
+T(sec_ts_l3l4csum, 1, 1, 0, 0, 0, 0, 1, 8, \
+ T_SEC_F | TSP_F | L3L4CSUM_F) \
+T(sec_ts_ol3ol4csum, 1, 1, 0, 0, 0, 1, 0, 8, \
+ T_SEC_F | TSP_F | OL3OL4CSUM_F) \
+T(sec_ts_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 0, 1, 1, 8, \
+ T_SEC_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_vlan, 1, 1, 0, 0, 1, 0, 0, 8, \
+ T_SEC_F | TSP_F | VLAN_F) \
+T(sec_ts_vlan_l3l4csum, 1, 1, 0, 0, 1, 0, 1, 8, \
+ T_SEC_F | TSP_F | VLAN_F | L3L4CSUM_F) \
+T(sec_ts_vlan_ol3ol4csum, 1, 1, 0, 0, 1, 1, 0, 8, \
+ T_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_ts_vlan_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 1, 1, 1, 8, \
+ T_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_noff, 1, 1, 0, 1, 0, 0, 0, 8, \
+ T_SEC_F | TSP_F | NOFF_F) \
+T(sec_ts_noff_l3l4csum, 1, 1, 0, 1, 0, 0, 1, 8, \
+ T_SEC_F | TSP_F | NOFF_F | L3L4CSUM_F) \
+T(sec_ts_noff_ol3ol4csum, 1, 1, 0, 1, 0, 1, 0, 8, \
+ T_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F) \
+T(sec_ts_noff_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 0, 1, 1, 8, \
+ T_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_noff_vlan, 1, 1, 0, 1, 1, 0, 0, 8, \
+ T_SEC_F | TSP_F | NOFF_F | VLAN_F) \
+T(sec_ts_noff_vlan_l3l4csum, 1, 1, 0, 1, 1, 0, 1, 8, \
+ T_SEC_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(sec_ts_noff_vlan_ol3ol4csum, 1, 1, 0, 1, 1, 1, 0, 8, \
+ T_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_ts_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 1, 1, 8, \
+ T_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec_ts_tso, 1, 1, 1, 0, 0, 0, 0, 8, \
+ T_SEC_F | TSP_F | TSO_F) \
+T(sec_ts_tso_l3l4csum, 1, 1, 1, 0, 0, 0, 1, 8, \
+ T_SEC_F | TSP_F | TSO_F | L3L4CSUM_F) \
+T(sec_ts_tso_ol3ol4csum, 1, 1, 1, 0, 0, 1, 0, 8, \
+ T_SEC_F | TSP_F | TSO_F | OL3OL4CSUM_F) \
+T(sec_ts_tso_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 0, 1, 1, 8, \
+ T_SEC_F | TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_tso_vlan, 1, 1, 1, 0, 1, 0, 0, 8, \
+ T_SEC_F | TSP_F | TSO_F | VLAN_F) \
+T(sec_ts_tso_vlan_l3l4csum, 1, 1, 1, 0, 1, 0, 1, 8, \
+ T_SEC_F | TSP_F | TSO_F | VLAN_F | L3L4CSUM_F) \
+T(sec_ts_tso_vlan_ol3ol4csum, 1, 1, 1, 0, 1, 1, 0, 8, \
+ T_SEC_F | TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_ts_tso_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 1, 1, 1, 8, \
+ T_SEC_F | TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_tso_noff, 1, 1, 1, 1, 0, 0, 0, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F) \
+T(sec_ts_tso_noff_l3l4csum, 1, 1, 1, 1, 0, 0, 1, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | L3L4CSUM_F) \
+T(sec_ts_tso_noff_ol3ol4csum, 1, 1, 1, 1, 0, 1, 0, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
+T(sec_ts_tso_noff_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 0, 1, 1, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec_ts_tso_noff_vlan, 1, 1, 1, 1, 1, 0, 0, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F) \
+T(sec_ts_tso_noff_vlan_l3l4csum, 1, 1, 1, 1, 1, 0, 1, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(sec_ts_tso_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 1, 1, 0, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)\
+T(sec_ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 1, 1, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \
+ L3L4CSUM_F)
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_##name( \
void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
\