#include <rte_vect.h>
+#include <rte_eventdev.h>
+
#define NIX_TX_OFFLOAD_NONE (0)
#define NIX_TX_OFFLOAD_L3_L4_CSUM_F BIT(0)
#define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
#define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
#define NIX_TX_OFFLOAD_TSO_F BIT(4)
#define NIX_TX_OFFLOAD_TSTAMP_F BIT(5)
+#define NIX_TX_OFFLOAD_SECURITY_F BIT(6)
+#define NIX_TX_OFFLOAD_MAX (NIX_TX_OFFLOAD_SECURITY_F << 1)
/* Flags to control xmit_prepare function.
* Defining it from backwards to denote its been
* not used as offload flags to pick function
*/
+#define NIX_TX_VWQE_F BIT(14)
#define NIX_TX_MULTI_SEG_F BIT(15)
#define NIX_TX_NEED_SEND_HDR_W1 \
} \
} while (0)
-#define LMT_OFF(lmt_addr, lmt_num, offset) \
- (void *)((lmt_addr) + ((lmt_num) << ROC_LMT_LINE_SIZE_LOG2) + (offset))
+/* Encoded number of segments to number of dwords macro, each value of nb_segs
+ * is encoded as 4bits.
+ */
+#define NIX_SEGDW_MAGIC 0x76654432210ULL
+
+#define NIX_NB_SEGS_TO_SEGDW(x) ((NIX_SEGDW_MAGIC >> ((x) << 2)) & 0xF)
/* Function to determine no of tx subdesc required in case ext
* sub desc is enabled.
static __rte_always_inline int
cn10k_nix_tx_ext_subs(const uint16_t flags)
{
- return (flags & NIX_TX_OFFLOAD_TSTAMP_F)
- ? 2
- : ((flags &
- (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F))
- ? 1
- : 0);
+ return (flags & NIX_TX_OFFLOAD_TSTAMP_F) ?
+ 2 :
+ ((flags &
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ?
+ 1 :
+ 0);
+}
+
+static __rte_always_inline uint8_t
+cn10k_nix_tx_dwords(const uint16_t flags, const uint8_t segdw)
+{
+ if (!(flags & NIX_TX_MULTI_SEG_F))
+ return cn10k_nix_tx_ext_subs(flags) + 2;
+
+ /* Already everything is accounted for in segdw */
+ return segdw;
}
static __rte_always_inline uint8_t
static __rte_always_inline uint8_t
cn10k_nix_tx_dwords_per_line(const uint16_t flags)
{
- return (flags & NIX_TX_NEED_EXT_HDR) ? 6 : 8;
+ return (flags & NIX_TX_NEED_EXT_HDR) ?
+ ((flags & NIX_TX_OFFLOAD_TSTAMP_F) ? 8 : 6) :
+ 8;
}
static __rte_always_inline uint64_t
return data;
}
+static __rte_always_inline uint8_t
+cn10k_nix_tx_dwords_per_line_seg(const uint16_t flags)
+{
+ return ((flags & NIX_TX_NEED_EXT_HDR) ?
+ (flags & NIX_TX_OFFLOAD_TSTAMP_F) ? 8 : 6 :
+ 4);
+}
+
static __rte_always_inline uint64_t
cn10k_nix_tx_steor_vec_data(const uint16_t flags)
{
return data;
}
+static __rte_always_inline uint64_t
+cn10k_cpt_tx_steor_data(void)
+{
+ /* We have two CPT instructions per LMTLine */
+ const uint64_t dw_m1 = ROC_CN10K_TWO_CPT_INST_DW_M1;
+ uint64_t data;
+
+ /* This will be moved to addr area */
+ data = dw_m1 << 16;
+ data |= dw_m1 << 19;
+ data |= dw_m1 << 22;
+ data |= dw_m1 << 25;
+ data |= dw_m1 << 28;
+ data |= dw_m1 << 31;
+ data |= dw_m1 << 34;
+ data |= dw_m1 << 37;
+ data |= dw_m1 << 40;
+ data |= dw_m1 << 43;
+ data |= dw_m1 << 46;
+ data |= dw_m1 << 49;
+ data |= dw_m1 << 52;
+ data |= dw_m1 << 55;
+ data |= dw_m1 << 58;
+ data |= dw_m1 << 61;
+
+ return data;
+}
+
static __rte_always_inline void
-cn10k_nix_tx_skeleton(const struct cn10k_eth_txq *txq, uint64_t *cmd,
- const uint16_t flags)
+cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
+ const uint16_t flags, const uint16_t static_sz)
{
- /* Send hdr */
- cmd[0] = txq->send_hdr_w0;
+ if (static_sz)
+ cmd[0] = txq->send_hdr_w0;
+ else
+ cmd[0] = (txq->send_hdr_w0 & 0xFFFFF00000000000) |
+ ((uint64_t)(cn10k_nix_tx_ext_subs(flags) + 1) << 40);
cmd[1] = 0;
- cmd += 2;
- /* Send ext if present */
if (flags & NIX_TX_NEED_EXT_HDR) {
- *(__uint128_t *)cmd = *(const __uint128_t *)txq->cmd;
- cmd += 2;
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F)
+ cmd[2] = (NIX_SUBDC_EXT << 60) | BIT_ULL(15);
+ else
+ cmd[2] = NIX_SUBDC_EXT << 60;
+ cmd[3] = 0;
+ cmd[4] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
+ } else {
+ cmd[2] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
}
+}
- /* Send sg */
- cmd[0] = txq->sg_w0;
- cmd[1] = 0;
+static __rte_always_inline void
+cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
+ uint8_t loff, uint8_t shft)
+{
+ uint64_t data;
+ uintptr_t pa;
+
+ /* Check if there is any CPT instruction to submit */
+ if (!lnum && !loff)
+ return;
+
+ data = cn10k_cpt_tx_steor_data();
+ /* Update lmtline use for partial end line */
+ if (loff) {
+ data &= ~(0x7ULL << shft);
+ /* Update it to half full i.e 64B */
+ data |= (0x3UL << shft);
+ }
+
+ pa = io_addr | ((data >> 16) & 0x7) << 4;
+ data &= ~(0x7ULL << 16);
+ /* Update lines - 1 that contain valid data */
+ data |= ((uint64_t)(lnum + loff - 1)) << 12;
+ data |= lmt_id;
+
+ /* STEOR */
+ roc_lmt_submit_steorl(data, pa);
}
+#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline void
+cn10k_nix_prep_sec_vec(struct rte_mbuf *m, uint64x2_t *cmd0, uint64x2_t *cmd1,
+ uintptr_t *nixtx_addr, uintptr_t lbase, uint8_t *lnum,
+ uint8_t *loff, uint8_t *shft, uint64_t sa_base,
+ const uint16_t flags)
+{
+ struct cn10k_sec_sess_priv sess_priv;
+ uint32_t pkt_len, dlen_adj, rlen;
+ uint64x2_t cmd01, cmd23;
+ uintptr_t dptr, nixtx;
+ uint64_t ucode_cmd[4];
+ uint64_t *laddr;
+ uint8_t l2_len;
+ uint16_t tag;
+ uint64_t sa;
+
+ sess_priv.u64 = *rte_security_dynfield(m);
+
+ if (flags & NIX_TX_NEED_SEND_HDR_W1)
+ l2_len = vgetq_lane_u8(*cmd0, 8);
+ else
+ l2_len = m->l2_len;
+
+ /* Retrieve DPTR */
+ dptr = vgetq_lane_u64(*cmd1, 1);
+ pkt_len = vgetq_lane_u16(*cmd0, 0);
+
+ /* Calculate dlen adj */
+ dlen_adj = pkt_len - l2_len;
+ rlen = (dlen_adj + sess_priv.roundup_len) +
+ (sess_priv.roundup_byte - 1);
+ rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
+ rlen += sess_priv.partial_len;
+ dlen_adj = rlen - dlen_adj;
+
+ /* Update send descriptors. Security is single segment only */
+ *cmd0 = vsetq_lane_u16(pkt_len + dlen_adj, *cmd0, 0);
+ *cmd1 = vsetq_lane_u16(pkt_len + dlen_adj, *cmd1, 0);
+
+ /* Get area where NIX descriptor needs to be stored */
+ nixtx = dptr + pkt_len + dlen_adj;
+ nixtx += BIT_ULL(7);
+ nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
+
+ /* Return nixtx addr */
+ *nixtx_addr = (nixtx + 16);
+
+ /* DLEN passed is excluding L2HDR */
+ pkt_len -= l2_len;
+ tag = sa_base & 0xFFFFUL;
+ sa_base &= ~0xFFFFUL;
+ sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+ ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+ ucode_cmd[0] =
+ (ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | pkt_len);
+
+ /* CPT Word 0 and Word 1 */
+ cmd01 = vdupq_n_u64((nixtx + 16) | (cn10k_nix_tx_ext_subs(flags) + 1));
+ /* CPT_RES_S is 16B above NIXTX */
+ cmd01 = vsetq_lane_u8(nixtx & BIT_ULL(7), cmd01, 8);
+
+ /* CPT word 2 and 3 */
+ cmd23 = vdupq_n_u64(0);
+ cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) | tag |
+ CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
+ cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+
+ dptr += l2_len;
+
+ if (sess_priv.mode == ROC_IE_SA_MODE_TUNNEL) {
+ if (sess_priv.outer_ip_ver == ROC_IE_SA_IP_VERSION_4)
+ *((uint16_t *)(dptr - 2)) =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ else
+ *((uint16_t *)(dptr - 2)) =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ }
+
+ ucode_cmd[1] = dptr;
+ ucode_cmd[2] = dptr;
+
+ /* Move to our line */
+ laddr = LMT_OFF(lbase, *lnum, *loff ? 64 : 0);
+
+ /* Write CPT instruction to lmt line */
+ vst1q_u64(laddr, cmd01);
+ vst1q_u64((laddr + 2), cmd23);
+
+ *(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+ *(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+ /* Move to next line for every other CPT inst */
+ *loff = !(*loff);
+ *lnum = *lnum + (*loff ? 0 : 1);
+ *shft = *shft + (*loff ? 0 : 3);
+}
+
+static __rte_always_inline void
+cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
+ uintptr_t lbase, uint8_t *lnum, uint8_t *loff, uint8_t *shft,
+ uint64_t sa_base, const uint16_t flags)
+{
+ struct cn10k_sec_sess_priv sess_priv;
+ uint32_t pkt_len, dlen_adj, rlen;
+ struct nix_send_hdr_s *send_hdr;
+ uint64x2_t cmd01, cmd23;
+ union nix_send_sg_s *sg;
+ uintptr_t dptr, nixtx;
+ uint64_t ucode_cmd[4];
+ uint64_t *laddr;
+ uint8_t l2_len;
+ uint16_t tag;
+ uint64_t sa;
+
+ /* Move to our line from base */
+ sess_priv.u64 = *rte_security_dynfield(m);
+ send_hdr = (struct nix_send_hdr_s *)cmd;
+ if (flags & NIX_TX_NEED_EXT_HDR)
+ sg = (union nix_send_sg_s *)&cmd[4];
+ else
+ sg = (union nix_send_sg_s *)&cmd[2];
+
+ if (flags & NIX_TX_NEED_SEND_HDR_W1)
+ l2_len = cmd[1] & 0xFF;
+ else
+ l2_len = m->l2_len;
+
+ /* Retrieve DPTR */
+ dptr = *(uint64_t *)(sg + 1);
+ pkt_len = send_hdr->w0.total;
+
+ /* Calculate dlen adj */
+ dlen_adj = pkt_len - l2_len;
+ rlen = (dlen_adj + sess_priv.roundup_len) +
+ (sess_priv.roundup_byte - 1);
+ rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
+ rlen += sess_priv.partial_len;
+ dlen_adj = rlen - dlen_adj;
+
+ /* Update send descriptors. Security is single segment only */
+ send_hdr->w0.total = pkt_len + dlen_adj;
+ sg->seg1_size = pkt_len + dlen_adj;
+
+ /* Get area where NIX descriptor needs to be stored */
+ nixtx = dptr + pkt_len + dlen_adj;
+ nixtx += BIT_ULL(7);
+ nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
+
+ /* Return nixtx addr */
+ *nixtx_addr = (nixtx + 16);
+
+ /* DLEN passed is excluding L2HDR */
+ pkt_len -= l2_len;
+ tag = sa_base & 0xFFFFUL;
+ sa_base &= ~0xFFFFUL;
+ sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+ ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+ ucode_cmd[0] =
+ (ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | pkt_len);
+
+ /* CPT Word 0 and Word 1. Assume no multi-seg support */
+ cmd01 = vdupq_n_u64((nixtx + 16) | (cn10k_nix_tx_ext_subs(flags) + 1));
+ /* CPT_RES_S is 16B above NIXTX */
+ cmd01 = vsetq_lane_u8(nixtx & BIT_ULL(7), cmd01, 8);
+
+ /* CPT word 2 and 3 */
+ cmd23 = vdupq_n_u64(0);
+ cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) | tag |
+ CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
+ cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+
+ dptr += l2_len;
+
+ if (sess_priv.mode == ROC_IE_SA_MODE_TUNNEL) {
+ if (sess_priv.outer_ip_ver == ROC_IE_SA_IP_VERSION_4)
+ *((uint16_t *)(dptr - 2)) =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ else
+ *((uint16_t *)(dptr - 2)) =
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ }
+ ucode_cmd[1] = dptr;
+ ucode_cmd[2] = dptr;
+
+ /* Move to our line */
+ laddr = LMT_OFF(lbase, *lnum, *loff ? 64 : 0);
+
+ /* Write CPT instruction to lmt line */
+ vst1q_u64(laddr, cmd01);
+ vst1q_u64((laddr + 2), cmd23);
+
+ *(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+ *(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+ /* Move to next line for every other CPT inst */
+ *loff = !(*loff);
+ *lnum = *lnum + (*loff ? 0 : 1);
+ *shft = *shft + (*loff ? 0 : 3);
+}
+
+#else
+
+static __rte_always_inline void
+cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
+ uintptr_t lbase, uint8_t *lnum, uint8_t *loff, uint8_t *shft,
+ uint64_t sa_base, const uint16_t flags)
+{
+ RTE_SET_USED(m);
+ RTE_SET_USED(cmd);
+ RTE_SET_USED(nixtx_addr);
+ RTE_SET_USED(lbase);
+ RTE_SET_USED(lnum);
+ RTE_SET_USED(loff);
+ RTE_SET_USED(shft);
+ RTE_SET_USED(sa_base);
+ RTE_SET_USED(flags);
+}
+#endif
+
static __rte_always_inline void
cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
{
uint64_t mask, ol_flags = m->ol_flags;
- if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
+ if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
uint16_t *iplen, *oiplen, *oudplen;
uint16_t lso_sb, paylen;
- mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
+ mask = -!!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6));
lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
m->l2_len + m->l3_len + m->l4_len;
/* Get iplen position assuming no tunnel hdr */
iplen = (uint16_t *)(mdata + m->l2_len +
- (2 << !!(ol_flags & PKT_TX_IPV6)));
+ (2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
oiplen = (uint16_t *)(mdata + m->outer_l2_len +
(2 << !!(ol_flags &
- PKT_TX_OUTER_IPV6)));
+ RTE_MBUF_F_TX_OUTER_IPV6)));
*oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
paylen);
/* Update iplen position to inner ip hdr */
iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
m->l4_len +
- (2 << !!(ol_flags & PKT_TX_IPV6)));
+ (2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
}
*iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
}
static __rte_always_inline void
-cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, uintptr_t lmt_addr,
- const uint16_t flags, const uint64_t lso_tun_fmt)
+cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
+ const uint64_t lso_tun_fmt, bool *sec, uint8_t mark_flag,
+ uint64_t mark_fmt)
{
+ uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
struct nix_send_ext_s *send_hdr_ext;
struct nix_send_hdr_s *send_hdr;
uint64_t ol_flags = 0, mask;
union nix_send_hdr_w1_u w1;
union nix_send_sg_s *sg;
+ uint16_t mark_form = 0;
send_hdr = (struct nix_send_hdr_s *)cmd;
if (flags & NIX_TX_NEED_EXT_HDR) {
sg = (union nix_send_sg_s *)(cmd + 4);
/* Clear previous markings */
send_hdr_ext->w0.lso = 0;
+ send_hdr_ext->w0.mark_en = 0;
send_hdr_ext->w1.u = 0;
+ ol_flags = m->ol_flags;
} else {
sg = (union nix_send_sg_s *)(cmd + 2);
}
- if (flags & NIX_TX_NEED_SEND_HDR_W1) {
+ if (flags & (NIX_TX_NEED_SEND_HDR_W1 | NIX_TX_OFFLOAD_SECURITY_F)) {
ol_flags = m->ol_flags;
w1.u = 0;
}
- if (!(flags & NIX_TX_MULTI_SEG_F)) {
+ if (!(flags & NIX_TX_MULTI_SEG_F))
send_hdr->w0.total = m->data_len;
- send_hdr->w0.aura =
- roc_npa_aura_handle_to_aura(m->pool->pool_id);
- }
+ else
+ send_hdr->w0.total = m->pkt_len;
+ send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
/*
* L3type: 2 => IPV4
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
- const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t ol3type =
- ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L3 */
w1.ol3type = ol3type;
w1.ol4type = csum + (csum << 1);
/* Inner L3 */
- w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_IPV6)) << 2);
+ w1.il3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2);
w1.il3ptr = w1.ol4ptr + m->l2_len;
w1.il4ptr = w1.il3ptr + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
+ w1.il3type = w1.il3type + !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
- w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+ w1.il4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
/* In case of no tunnel header use only
* shift IL3/IL4 fields a bit to use
((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
} else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
- const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t outer_l2_len = m->outer_l2_len;
/* Outer L3 */
w1.ol3ptr = outer_l2_len;
w1.ol4ptr = outer_l2_len + m->outer_l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+ w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L4 */
w1.ol4type = csum + (csum << 1);
w1.ol3ptr = l2_len;
w1.ol4ptr = l2_len + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
- w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
- ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
- !!(ol_flags & PKT_TX_IP_CKSUM);
+ w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
- w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+ w1.ol4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
}
if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
- send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
+ const uint8_t ipv6 = !!(ol_flags & RTE_MBUF_F_TX_IPV6);
+ const uint8_t ip = !!(ol_flags & (RTE_MBUF_F_TX_IPV4 |
+ RTE_MBUF_F_TX_IPV6));
+
+ send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
- send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
+ send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_QINQ);
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
+ /* Fill for VLAN marking only when VLAN insertion enabled */
+ mark_vlan = ((mark_flag & CNXK_TM_MARK_VLAN_DEI) &
+ (send_hdr_ext->w1.vlan1_ins_ena ||
+ send_hdr_ext->w1.vlan0_ins_ena));
+
+ /* Mask requested flags with packet data information */
+ mark_off = mark_flag & ((ip << 2) | (ip << 1) | mark_vlan);
+ mark_off = ffs(mark_off & CNXK_TM_MARK_MASK);
+
+ mark_form = (mark_fmt >> ((mark_off - !!mark_off) << 4));
+ mark_form = (mark_form >> (ipv6 << 3)) & 0xFF;
+ markptr = m->l2_len + (mark_form >> 7) - (mark_vlan << 2);
+
+ send_hdr_ext->w0.mark_en = !!mark_off;
+ send_hdr_ext->w0.markform = mark_form & 0x7F;
+ send_hdr_ext->w0.markptr = markptr;
}
- if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
+ if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uint16_t lso_sb;
uint64_t mask;
send_hdr_ext->w0.lso = 1;
send_hdr_ext->w0.lso_mps = m->tso_segsz;
send_hdr_ext->w0.lso_format =
- NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
+ NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
- (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
- ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
uint8_t shift = is_udp_tun ? 32 : 0;
- shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
- shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 4);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_IPV6) << 3);
w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
send_hdr->w1.u = w1.u;
if (!(flags & NIX_TX_MULTI_SEG_F)) {
- sg->seg1_size = m->data_len;
+ sg->seg1_size = send_hdr->w0.total;
*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
}
/* Mark mempool object as "put" since it is freed by NIX */
if (!send_hdr->w0.df)
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+ } else {
+ sg->seg1_size = m->data_len;
+ *(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
+
+ /* NOFF is handled later for multi-seg */
}
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F)
+ *sec = !!(ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
+}
+
+static __rte_always_inline void
+cn10k_nix_xmit_mv_lmt_base(uintptr_t lmt_addr, uint64_t *cmd,
+ const uint16_t flags)
+{
+ struct nix_send_ext_s *send_hdr_ext;
+ union nix_send_sg_s *sg;
+
/* With minimal offloads, 'cmd' being local could be optimized out to
* registers. In other cases, 'cmd' will be in stack. Intent is
* 'cmd' stores content from txq->cmd which is copied only once.
*/
- *((struct nix_send_hdr_s *)lmt_addr) = *send_hdr;
+ *((struct nix_send_hdr_s *)lmt_addr) = *(struct nix_send_hdr_s *)cmd;
lmt_addr += 16;
if (flags & NIX_TX_NEED_EXT_HDR) {
+ send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);
*((struct nix_send_ext_s *)lmt_addr) = *send_hdr_ext;
lmt_addr += 16;
+
+ sg = (union nix_send_sg_s *)(cmd + 4);
+ } else {
+ sg = (union nix_send_sg_s *)(cmd + 2);
}
/* In case of multi-seg, sg template is stored here */
*((union nix_send_sg_s *)lmt_addr) = *sg;
}
static __rte_always_inline void
-cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,
+cn10k_nix_xmit_prepare_tstamp(struct cn10k_eth_txq *txq, uintptr_t lmt_addr,
const uint64_t ol_flags, const uint16_t no_segdw,
const uint16_t flags)
{
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
- const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
- struct nix_send_ext_s *send_hdr_ext =
- (struct nix_send_ext_s *)lmt_addr + 16;
+ const uint8_t is_ol_tstamp =
+ !(ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST);
uint64_t *lmt = (uint64_t *)lmt_addr;
uint16_t off = (no_segdw - 1) << 1;
struct nix_send_mem_s *send_mem;
send_mem = (struct nix_send_mem_s *)(lmt + off);
- send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
- send_hdr_ext->w0.tstmp = 1;
- if (flags & NIX_TX_MULTI_SEG_F) {
- /* Retrieving the default desc values */
- lmt[off] = cmd[2];
-
- /* Using compiler barier to avoid voilation of C
- * aliasing rules.
- */
- rte_compiler_barrier();
- }
-
/* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
- * NIX_SENDMEMALG_SET and also changing send mem addr field to
- * next 8 bytes as it corrpt the actual tx tstamp registered
+ * NIX_SENDMEMALG_SUB and also changing send mem addr field to
+ * next 8 bytes as it corrupts the actual Tx tstamp registered
* address.
*/
send_mem->w0.subdc = NIX_SUBDC_MEM;
- send_mem->w0.alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);
+ send_mem->w0.alg =
+ NIX_SENDMEMALG_SETTSTMP + (is_ol_tstamp << 3);
send_mem->addr =
- (rte_iova_t)(((uint64_t *)cmd[3]) + is_ol_tstamp);
+ (rte_iova_t)(((uint64_t *)txq->ts_mem) + is_ol_tstamp);
}
}
uint8_t off, i;
send_hdr = (struct nix_send_hdr_s *)cmd;
- send_hdr->w0.total = m->pkt_len;
- send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
if (flags & NIX_TX_NEED_EXT_HDR)
off = 2;
off = 0;
sg = (union nix_send_sg_s *)&cmd[2 + off];
- /* Clear sg->u header before use */
- sg->u &= 0xFC00000000000000;
+
+ /* Start from second segment, first segment is already there */
+ i = 1;
sg_u = sg->u;
- slist = &cmd[3 + off];
+ nb_segs = m->nb_segs - 1;
+ m_next = m->next;
+ slist = &cmd[3 + off + 1];
+
+ /* Set invert df if buffer is not to be freed by H/W */
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+ sg_u |= (cnxk_nix_prefree_seg(m) << 55);
- i = 0;
- nb_segs = m->nb_segs;
+ /* Mark mempool object as "put" since it is freed by NIX */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (!(sg_u & (1ULL << 55)))
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+ rte_io_wmb();
+#endif
+ m = m_next;
+ if (!m)
+ goto done;
/* Fill mbuf segments */
do {
*/
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
if (!(sg_u & (1ULL << (i + 55))))
- __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
#endif
slist++;
i++;
m = m_next;
} while (nb_segs);
+done:
sg->u = sg_u;
sg->segs = i;
segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
}
static __rte_always_inline uint16_t
-cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
- uint64_t *cmd, const uint16_t flags)
+cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
+ uint16_t pkts, uint64_t *cmd, const uint16_t flags)
{
struct cn10k_eth_txq *txq = tx_queue;
const rte_iova_t io_addr = txq->io_addr;
- uintptr_t pa, lmt_addr = txq->lmt_base;
+ uint8_t lnum, c_lnum, c_shft, c_loff;
+ uintptr_t pa, lbase = txq->lmt_base;
uint16_t lmt_id, burst, left, i;
+ uintptr_t c_lbase = lbase;
+ uint64_t mark_fmt = 0;
+ uint8_t mark_flag = 0;
+ rte_iova_t c_io_addr;
uint64_t lso_tun_fmt;
+ uint16_t c_lmt_id;
+ uint64_t sa_base;
+ uintptr_t laddr;
uint64_t data;
+ bool sec;
- NIX_XMIT_FC_OR_RETURN(txq, pkts);
-
+ if (!(flags & NIX_TX_VWQE_F)) {
+ NIX_XMIT_FC_OR_RETURN(txq, pkts);
+ /* Reduce the cached count */
+ txq->fc_cache_pkts -= pkts;
+ }
/* Get cmd skeleton */
- cn10k_nix_tx_skeleton(txq, cmd, flags);
-
- /* Reduce the cached count */
- txq->fc_cache_pkts -= pkts;
+ cn10k_nix_tx_skeleton(txq, cmd, flags, !(flags & NIX_TX_VWQE_F));
if (flags & NIX_TX_OFFLOAD_TSO_F)
lso_tun_fmt = txq->lso_tun_fmt;
+ if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+ mark_fmt = txq->mark_fmt;
+ mark_flag = txq->mark_flag;
+ }
+
/* Get LMT base address and LMT ID as lcore id */
- ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+ ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+ c_io_addr = txq->cpt_io_addr;
+ sa_base = txq->sa_base;
+ }
+
left = pkts;
again:
burst = left > 32 ? 32 : left;
+
+ lnum = 0;
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ c_lnum = 0;
+ c_loff = 0;
+ c_shft = 16;
+ }
+
for (i = 0; i < burst; i++) {
/* Perform header writes for TSO, barrier at
* lmt steorl will suffice.
if (flags & NIX_TX_OFFLOAD_TSO_F)
cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
- cn10k_nix_xmit_prepare(tx_pkts[i], cmd, lmt_addr, flags,
- lso_tun_fmt);
- cn10k_nix_xmit_prepare_tstamp(lmt_addr, &txq->cmd[0],
- tx_pkts[i]->ol_flags, 4, flags);
- lmt_addr += (1ULL << ROC_LMT_LINE_SIZE_LOG2);
+ cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+ &sec, mark_flag, mark_fmt);
+
+ laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
+
+ /* Prepare CPT instruction and get nixtx addr */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+ cn10k_nix_prep_sec(tx_pkts[i], cmd, &laddr, c_lbase,
+ &c_lnum, &c_loff, &c_shft, sa_base,
+ flags);
+
+ /* Move NIX desc to LMT/NIXTX area */
+ cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+ cn10k_nix_xmit_prepare_tstamp(txq, laddr, tx_pkts[i]->ol_flags,
+ 4, flags);
+ if (!(flags & NIX_TX_OFFLOAD_SECURITY_F) || !sec)
+ lnum++;
+ }
+
+ if ((flags & NIX_TX_VWQE_F) && !(ws[1] & BIT_ULL(35)))
+ ws[1] = roc_sso_hws_head_wait(ws[0]);
+
+ left -= burst;
+ tx_pkts += burst;
+
+ /* Submit CPT instructions if any */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ /* Reduce pkts to be sent to CPT */
+ burst -= ((c_lnum << 1) + c_loff);
+ cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
+ c_shft);
}
/* Trigger LMTST */
roc_lmt_submit_steorl(data, pa);
}
- left -= burst;
rte_io_wmb();
- if (left) {
- /* Start processing another burst */
- tx_pkts += burst;
- /* Reset lmt base addr */
- lmt_addr -= (1ULL << ROC_LMT_LINE_SIZE_LOG2);
- lmt_addr &= (~(BIT_ULL(ROC_LMT_BASE_PER_CORE_LOG2) - 1));
+ if (left)
goto again;
- }
return pkts;
}
static __rte_always_inline uint16_t
-cn10k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t pkts, uint64_t *cmd, const uint16_t flags)
+cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
+ struct rte_mbuf **tx_pkts, uint16_t pkts,
+ uint64_t *cmd, const uint16_t flags)
{
struct cn10k_eth_txq *txq = tx_queue;
- uintptr_t pa0, pa1, lmt_addr = txq->lmt_base;
+ uintptr_t pa0, pa1, lbase = txq->lmt_base;
const rte_iova_t io_addr = txq->io_addr;
uint16_t segdw, lmt_id, burst, left, i;
+ uint8_t lnum, c_lnum, c_loff;
+ uintptr_t c_lbase = lbase;
+ uint64_t mark_fmt = 0;
+ uint8_t mark_flag = 0;
uint64_t data0, data1;
+ rte_iova_t c_io_addr;
uint64_t lso_tun_fmt;
+ uint8_t shft, c_shft;
__uint128_t data128;
- uint16_t shft;
-
- NIX_XMIT_FC_OR_RETURN(txq, pkts);
-
- cn10k_nix_tx_skeleton(txq, cmd, flags);
-
- /* Reduce the cached count */
- txq->fc_cache_pkts -= pkts;
+ uint16_t c_lmt_id;
+ uint64_t sa_base;
+ uintptr_t laddr;
+ bool sec;
+
+ if (!(flags & NIX_TX_VWQE_F)) {
+ NIX_XMIT_FC_OR_RETURN(txq, pkts);
+ /* Reduce the cached count */
+ txq->fc_cache_pkts -= pkts;
+ }
+ /* Get cmd skeleton */
+ cn10k_nix_tx_skeleton(txq, cmd, flags, !(flags & NIX_TX_VWQE_F));
if (flags & NIX_TX_OFFLOAD_TSO_F)
lso_tun_fmt = txq->lso_tun_fmt;
+ if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+ mark_fmt = txq->mark_fmt;
+ mark_flag = txq->mark_flag;
+ }
+
/* Get LMT base address and LMT ID as lcore id */
- ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+ ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+ c_io_addr = txq->cpt_io_addr;
+ sa_base = txq->sa_base;
+ }
+
left = pkts;
again:
burst = left > 32 ? 32 : left;
shft = 16;
data128 = 0;
+
+ lnum = 0;
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ c_lnum = 0;
+ c_loff = 0;
+ c_shft = 16;
+ }
+
for (i = 0; i < burst; i++) {
/* Perform header writes for TSO, barrier at
* lmt steorl will suffice.
if (flags & NIX_TX_OFFLOAD_TSO_F)
cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
- cn10k_nix_xmit_prepare(tx_pkts[i], cmd, lmt_addr, flags,
- lso_tun_fmt);
+ cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+ &sec, mark_flag, mark_fmt);
+
+ laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
+
+ /* Prepare CPT instruction and get nixtx addr */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+ cn10k_nix_prep_sec(tx_pkts[i], cmd, &laddr, c_lbase,
+ &c_lnum, &c_loff, &c_shft, sa_base,
+ flags);
+
+ /* Move NIX desc to LMT/NIXTX area */
+ cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
/* Store sg list directly on lmt line */
- segdw = cn10k_nix_prepare_mseg(tx_pkts[i], (uint64_t *)lmt_addr,
+ segdw = cn10k_nix_prepare_mseg(tx_pkts[i], (uint64_t *)laddr,
flags);
- cn10k_nix_xmit_prepare_tstamp(lmt_addr, &txq->cmd[0],
- tx_pkts[i]->ol_flags, segdw,
- flags);
- lmt_addr += (1ULL << ROC_LMT_LINE_SIZE_LOG2);
- data128 |= (((__uint128_t)(segdw - 1)) << shft);
- shft += 3;
+ cn10k_nix_xmit_prepare_tstamp(txq, laddr, tx_pkts[i]->ol_flags,
+ segdw, flags);
+ if (!(flags & NIX_TX_OFFLOAD_SECURITY_F) || !sec) {
+ lnum++;
+ data128 |= (((__uint128_t)(segdw - 1)) << shft);
+ shft += 3;
+ }
+ }
+
+ if ((flags & NIX_TX_VWQE_F) && !(ws[1] & BIT_ULL(35)))
+ ws[1] = roc_sso_hws_head_wait(ws[0]);
+
+ left -= burst;
+ tx_pkts += burst;
+
+ /* Submit CPT instructions if any */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ /* Reduce pkts to be sent to CPT */
+ burst -= ((c_lnum << 1) + c_loff);
+ cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
+ c_shft);
}
data0 = (uint64_t)data128;
roc_lmt_submit_steorl(data0, pa0);
}
- left -= burst;
rte_io_wmb();
- if (left) {
- /* Start processing another burst */
- tx_pkts += burst;
- /* Reset lmt base addr */
- lmt_addr -= (1ULL << ROC_LMT_LINE_SIZE_LOG2);
- lmt_addr &= (~(BIT_ULL(ROC_LMT_BASE_PER_CORE_LOG2) - 1));
+ if (left)
goto again;
- }
return pkts;
}
#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline void
+cn10k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
+ union nix_send_ext_w0_u *w0, uint64_t ol_flags,
+ const uint64_t flags, const uint64_t lso_tun_fmt)
+{
+ uint16_t lso_sb;
+ uint64_t mask;
+
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
+ return;
+
+ mask = -(!w1->il3type);
+ lso_sb = (mask & w1->ol4ptr) + (~mask & w1->il4ptr) + m->l4_len;
+
+ w0->u |= BIT(14);
+ w0->lso_sb = lso_sb;
+ w0->lso_mps = m->tso_segsz;
+ w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
+ w1->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+
+ /* Handle tunnel tso */
+ if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
+ (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
+ const uint8_t is_udp_tun =
+ (CNXK_NIX_UDP_TUN_BITMASK >>
+ ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
+ 0x1;
+ uint8_t shift = is_udp_tun ? 32 : 0;
+
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 4);
+ shift += (!!(ol_flags & RTE_MBUF_F_TX_IPV6) << 3);
+
+ w1->il4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ w1->ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
+ /* Update format for UDP tunneled packet */
+
+ w0->lso_format = (lso_tun_fmt >> shift);
+ }
+}
+
+static __rte_always_inline void
+cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
+ union nix_send_hdr_w0_u *sh,
+ union nix_send_sg_s *sg, const uint32_t flags)
+{
+ struct rte_mbuf *m_next;
+ uint64_t *slist, sg_u;
+ uint16_t nb_segs;
+ int i = 1;
+
+ sh->total = m->pkt_len;
+ /* Clear sg->u header before use */
+ sg->u &= 0xFC00000000000000;
+ sg_u = sg->u;
+ slist = &cmd[0];
+
+ sg_u = sg_u | ((uint64_t)m->data_len);
+
+ nb_segs = m->nb_segs - 1;
+ m_next = m->next;
+
+ /* Set invert df if buffer is not to be freed by H/W */
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+ sg_u |= (cnxk_nix_prefree_seg(m) << 55);
+ /* Mark mempool object as "put" since it is freed by NIX */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (!(sg_u & (1ULL << 55)))
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+ rte_io_wmb();
+#endif
+
+ m = m_next;
+ /* Fill mbuf segments */
+ do {
+ m_next = m->next;
+ sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
+ *slist = rte_mbuf_data_iova(m);
+ /* Set invert df if buffer is not to be freed by H/W */
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+ sg_u |= (cnxk_nix_prefree_seg(m) << (i + 55));
+ /* Mark mempool object as "put" since it is freed by NIX
+ */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (!(sg_u & (1ULL << (i + 55))))
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+ rte_io_wmb();
+#endif
+ slist++;
+ i++;
+ nb_segs--;
+ if (i > 2 && nb_segs) {
+ i = 0;
+ /* Next SG subdesc */
+ *(uint64_t *)slist = sg_u & 0xFC00000000000000;
+ sg->u = sg_u;
+ sg->segs = 3;
+ sg = (union nix_send_sg_s *)slist;
+ sg_u = sg->u;
+ slist++;
+ }
+ m = m_next;
+ } while (nb_segs);
+
+ sg->u = sg_u;
+ sg->segs = i;
+}
+
+static __rte_always_inline void
+cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
+ uint64x2_t *cmd1, const uint8_t segdw,
+ const uint32_t flags)
+{
+ union nix_send_hdr_w0_u sh;
+ union nix_send_sg_s sg;
+
+ if (m->nb_segs == 1) {
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+ sg.u = vgetq_lane_u64(cmd1[0], 0);
+ sg.u |= (cnxk_nix_prefree_seg(m) << 55);
+ cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0);
+ }
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ sg.u = vgetq_lane_u64(cmd1[0], 0);
+ if (!(sg.u & (1ULL << 55)))
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+ rte_io_wmb();
+#endif
+ return;
+ }
+
+ sh.u = vgetq_lane_u64(cmd0[0], 0);
+ sg.u = vgetq_lane_u64(cmd1[0], 0);
+
+ cn10k_nix_prepare_mseg_vec_list(m, cmd, &sh, &sg, flags);
+
+ sh.sizem1 = segdw - 1;
+ cmd0[0] = vsetq_lane_u64(sh.u, cmd0[0], 0);
+ cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0);
+}
+
#define NIX_DESCS_PER_LOOP 4
+
+static __rte_always_inline uint8_t
+cn10k_nix_prep_lmt_mseg_vector(struct rte_mbuf **mbufs, uint64x2_t *cmd0,
+ uint64x2_t *cmd1, uint64x2_t *cmd2,
+ uint64x2_t *cmd3, uint8_t *segdw,
+ uint64_t *lmt_addr, __uint128_t *data128,
+ uint8_t *shift, const uint16_t flags)
+{
+ uint8_t j, off, lmt_used;
+
+ if (!(flags & NIX_TX_NEED_EXT_HDR) &&
+ !(flags & NIX_TX_OFFLOAD_TSTAMP_F)) {
+ /* No segments in 4 consecutive packets. */
+ if ((segdw[0] + segdw[1] + segdw[2] + segdw[3]) <= 8) {
+ for (j = 0; j < NIX_DESCS_PER_LOOP; j++)
+ cn10k_nix_prepare_mseg_vec(mbufs[j], NULL,
+ &cmd0[j], &cmd1[j],
+ segdw[j], flags);
+ vst1q_u64(lmt_addr, cmd0[0]);
+ vst1q_u64(lmt_addr + 2, cmd1[0]);
+ vst1q_u64(lmt_addr + 4, cmd0[1]);
+ vst1q_u64(lmt_addr + 6, cmd1[1]);
+ vst1q_u64(lmt_addr + 8, cmd0[2]);
+ vst1q_u64(lmt_addr + 10, cmd1[2]);
+ vst1q_u64(lmt_addr + 12, cmd0[3]);
+ vst1q_u64(lmt_addr + 14, cmd1[3]);
+
+ *data128 |= ((__uint128_t)7) << *shift;
+ *shift += 3;
+
+ return 1;
+ }
+ }
+
+ lmt_used = 0;
+ for (j = 0; j < NIX_DESCS_PER_LOOP;) {
+ /* Fit consecutive packets in same LMTLINE. */
+ if ((segdw[j] + segdw[j + 1]) <= 8) {
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ cn10k_nix_prepare_mseg_vec(mbufs[j], NULL,
+ &cmd0[j], &cmd1[j],
+ segdw[j], flags);
+ cn10k_nix_prepare_mseg_vec(mbufs[j + 1], NULL,
+ &cmd0[j + 1],
+ &cmd1[j + 1],
+ segdw[j + 1], flags);
+ /* TSTAMP takes 4 each, no segs. */
+ vst1q_u64(lmt_addr, cmd0[j]);
+ vst1q_u64(lmt_addr + 2, cmd2[j]);
+ vst1q_u64(lmt_addr + 4, cmd1[j]);
+ vst1q_u64(lmt_addr + 6, cmd3[j]);
+
+ vst1q_u64(lmt_addr + 8, cmd0[j + 1]);
+ vst1q_u64(lmt_addr + 10, cmd2[j + 1]);
+ vst1q_u64(lmt_addr + 12, cmd1[j + 1]);
+ vst1q_u64(lmt_addr + 14, cmd3[j + 1]);
+ } else if (flags & NIX_TX_NEED_EXT_HDR) {
+ /* EXT header take 3 each, space for 2 segs.*/
+ cn10k_nix_prepare_mseg_vec(mbufs[j],
+ lmt_addr + 6,
+ &cmd0[j], &cmd1[j],
+ segdw[j], flags);
+ vst1q_u64(lmt_addr, cmd0[j]);
+ vst1q_u64(lmt_addr + 2, cmd2[j]);
+ vst1q_u64(lmt_addr + 4, cmd1[j]);
+ off = segdw[j] - 3;
+ off <<= 1;
+ cn10k_nix_prepare_mseg_vec(mbufs[j + 1],
+ lmt_addr + 12 + off,
+ &cmd0[j + 1],
+ &cmd1[j + 1],
+ segdw[j + 1], flags);
+ vst1q_u64(lmt_addr + 6 + off, cmd0[j + 1]);
+ vst1q_u64(lmt_addr + 8 + off, cmd2[j + 1]);
+ vst1q_u64(lmt_addr + 10 + off, cmd1[j + 1]);
+ } else {
+ cn10k_nix_prepare_mseg_vec(mbufs[j],
+ lmt_addr + 4,
+ &cmd0[j], &cmd1[j],
+ segdw[j], flags);
+ vst1q_u64(lmt_addr, cmd0[j]);
+ vst1q_u64(lmt_addr + 2, cmd1[j]);
+ off = segdw[j] - 2;
+ off <<= 1;
+ cn10k_nix_prepare_mseg_vec(mbufs[j + 1],
+ lmt_addr + 8 + off,
+ &cmd0[j + 1],
+ &cmd1[j + 1],
+ segdw[j + 1], flags);
+ vst1q_u64(lmt_addr + 4 + off, cmd0[j + 1]);
+ vst1q_u64(lmt_addr + 6 + off, cmd1[j + 1]);
+ }
+ *data128 |= ((__uint128_t)(segdw[j] + segdw[j + 1]) - 1)
+ << *shift;
+ *shift += 3;
+ j += 2;
+ } else {
+ if ((flags & NIX_TX_NEED_EXT_HDR) &&
+ (flags & NIX_TX_OFFLOAD_TSTAMP_F)) {
+ cn10k_nix_prepare_mseg_vec(mbufs[j],
+ lmt_addr + 6,
+ &cmd0[j], &cmd1[j],
+ segdw[j], flags);
+ vst1q_u64(lmt_addr, cmd0[j]);
+ vst1q_u64(lmt_addr + 2, cmd2[j]);
+ vst1q_u64(lmt_addr + 4, cmd1[j]);
+ off = segdw[j] - 4;
+ off <<= 1;
+ vst1q_u64(lmt_addr + 6 + off, cmd3[j]);
+ } else if (flags & NIX_TX_NEED_EXT_HDR) {
+ cn10k_nix_prepare_mseg_vec(mbufs[j],
+ lmt_addr + 6,
+ &cmd0[j], &cmd1[j],
+ segdw[j], flags);
+ vst1q_u64(lmt_addr, cmd0[j]);
+ vst1q_u64(lmt_addr + 2, cmd2[j]);
+ vst1q_u64(lmt_addr + 4, cmd1[j]);
+ } else {
+ cn10k_nix_prepare_mseg_vec(mbufs[j],
+ lmt_addr + 4,
+ &cmd0[j], &cmd1[j],
+ segdw[j], flags);
+ vst1q_u64(lmt_addr, cmd0[j]);
+ vst1q_u64(lmt_addr + 2, cmd1[j]);
+ }
+ *data128 |= ((__uint128_t)(segdw[j]) - 1) << *shift;
+ *shift += 3;
+ j++;
+ }
+ lmt_used++;
+ lmt_addr += 16;
+ }
+
+ return lmt_used;
+}
+
+static __rte_always_inline void
+cn10k_nix_lmt_next(uint8_t dw, uintptr_t laddr, uint8_t *lnum, uint8_t *loff,
+ uint8_t *shift, __uint128_t *data128, uintptr_t *next)
+{
+ /* Go to next line if we are out of space */
+ if ((*loff + (dw << 4)) > 128) {
+ *data128 = *data128 |
+ (((__uint128_t)((*loff >> 4) - 1)) << *shift);
+ *shift = *shift + 3;
+ *loff = 0;
+ *lnum = *lnum + 1;
+ }
+
+ *next = (uintptr_t)LMT_OFF(laddr, *lnum, *loff);
+ *loff = *loff + (dw << 4);
+}
+
+static __rte_always_inline void
+cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr,
+ uint64x2_t cmd0, uint64x2_t cmd1, uint64x2_t cmd2,
+ uint64x2_t cmd3, const uint16_t flags)
+{
+ uint8_t off;
+
+ /* Handle no fast free when security is enabled without mseg */
+ if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
+ (flags & NIX_TX_OFFLOAD_SECURITY_F) &&
+ !(flags & NIX_TX_MULTI_SEG_F)) {
+ union nix_send_sg_s sg;
+
+ sg.u = vgetq_lane_u64(cmd1, 0);
+ sg.u |= (cnxk_nix_prefree_seg(mbuf) << 55);
+ cmd1 = vsetq_lane_u64(sg.u, cmd1, 0);
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ sg.u = vgetq_lane_u64(cmd1, 0);
+ if (!(sg.u & (1ULL << 55)))
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1,
+ 0);
+ rte_io_wmb();
+#endif
+ }
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ if ((flags & NIX_TX_NEED_EXT_HDR) &&
+ (flags & NIX_TX_OFFLOAD_TSTAMP_F)) {
+ cn10k_nix_prepare_mseg_vec(mbuf, LMT_OFF(laddr, 0, 48),
+ &cmd0, &cmd1, segdw, flags);
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+ vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+ off = segdw - 4;
+ off <<= 4;
+ vst1q_u64(LMT_OFF(laddr, 0, 48 + off), cmd3);
+ } else if (flags & NIX_TX_NEED_EXT_HDR) {
+ cn10k_nix_prepare_mseg_vec(mbuf, LMT_OFF(laddr, 0, 48),
+ &cmd0, &cmd1, segdw, flags);
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+ vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+ } else {
+ cn10k_nix_prepare_mseg_vec(mbuf, LMT_OFF(laddr, 0, 32),
+ &cmd0, &cmd1, segdw, flags);
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd1);
+ }
+ } else if (flags & NIX_TX_NEED_EXT_HDR) {
+ /* Store the prepared send desc to LMT lines */
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+ vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+ vst1q_u64(LMT_OFF(laddr, 0, 48), cmd3);
+ } else {
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+ vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+ }
+ } else {
+ /* Store the prepared send desc to LMT lines */
+ vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+ vst1q_u64(LMT_OFF(laddr, 0, 16), cmd1);
+ }
+}
+
static __rte_always_inline uint16_t
-cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t pkts, uint64_t *cmd, const uint16_t flags)
+cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
+ struct rte_mbuf **tx_pkts, uint16_t pkts,
+ uint64_t *cmd, const uint16_t flags)
{
uint64x2_t dataoff_iova0, dataoff_iova1, dataoff_iova2, dataoff_iova3;
uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3;
uint64x2_t cmd0[NIX_DESCS_PER_LOOP], cmd1[NIX_DESCS_PER_LOOP],
- cmd2[NIX_DESCS_PER_LOOP];
- uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3, data, pa;
+ cmd2[NIX_DESCS_PER_LOOP], cmd3[NIX_DESCS_PER_LOOP];
+ uint16_t left, scalar, burst, i, lmt_id, c_lmt_id;
+ uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3, pa;
uint64x2_t senddesc01_w0, senddesc23_w0;
uint64x2_t senddesc01_w1, senddesc23_w1;
- uint16_t left, scalar, burst, i, lmt_id;
uint64x2_t sendext01_w0, sendext23_w0;
uint64x2_t sendext01_w1, sendext23_w1;
+ uint64x2_t sendmem01_w0, sendmem23_w0;
+ uint64x2_t sendmem01_w1, sendmem23_w1;
+ uint8_t segdw[NIX_DESCS_PER_LOOP + 1];
uint64x2_t sgdesc01_w0, sgdesc23_w0;
uint64x2_t sgdesc01_w1, sgdesc23_w1;
struct cn10k_eth_txq *txq = tx_queue;
- uintptr_t laddr = txq->lmt_base;
rte_iova_t io_addr = txq->io_addr;
+ uintptr_t laddr = txq->lmt_base;
+ uint8_t c_lnum, c_shft, c_loff;
uint64x2_t ltypes01, ltypes23;
uint64x2_t xtmp128, ytmp128;
uint64x2_t xmask01, xmask23;
- uint8_t lnum;
-
- NIX_XMIT_FC_OR_RETURN(txq, pkts);
+ uintptr_t c_laddr = laddr;
+ uint8_t lnum, shift, loff;
+ rte_iova_t c_io_addr;
+ uint64_t sa_base;
+ union wdata {
+ __uint128_t data128;
+ uint64_t data[2];
+ } wd;
+
+ if (!(flags & NIX_TX_VWQE_F)) {
+ NIX_XMIT_FC_OR_RETURN(txq, pkts);
+ scalar = pkts & (NIX_DESCS_PER_LOOP - 1);
+ pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
+ /* Reduce the cached count */
+ txq->fc_cache_pkts -= pkts;
+ } else {
+ scalar = pkts & (NIX_DESCS_PER_LOOP - 1);
+ pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
+ }
- scalar = pkts & (NIX_DESCS_PER_LOOP - 1);
- pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
+ /* Perform header writes before barrier for TSO */
+ if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ for (i = 0; i < pkts; i++)
+ cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
+ }
- /* Reduce the cached count */
- txq->fc_cache_pkts -= pkts;
+ if (!(flags & NIX_TX_VWQE_F)) {
+ senddesc01_w0 = vld1q_dup_u64(&txq->send_hdr_w0);
+ } else {
+ uint64_t w0 =
+ (txq->send_hdr_w0 & 0xFFFFF00000000000) |
+ ((uint64_t)(cn10k_nix_tx_ext_subs(flags) + 1) << 40);
- senddesc01_w0 = vld1q_dup_u64(&txq->send_hdr_w0);
+ senddesc01_w0 = vdupq_n_u64(w0);
+ }
senddesc23_w0 = senddesc01_w0;
+
senddesc01_w1 = vdupq_n_u64(0);
senddesc23_w1 = senddesc01_w1;
- sgdesc01_w0 = vld1q_dup_u64(&txq->sg_w0);
+ sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | BIT_ULL(48));
sgdesc23_w0 = sgdesc01_w0;
- /* Load command defaults into vector variables. */
if (flags & NIX_TX_NEED_EXT_HDR) {
- sendext01_w0 = vld1q_dup_u64(&txq->cmd[0]);
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ sendext01_w0 = vdupq_n_u64((NIX_SUBDC_EXT << 60) |
+ BIT_ULL(15));
+ sendmem01_w0 =
+ vdupq_n_u64((NIX_SUBDC_MEM << 60) |
+ (NIX_SENDMEMALG_SETTSTMP << 56));
+ sendmem23_w0 = sendmem01_w0;
+ sendmem01_w1 = vdupq_n_u64(txq->ts_mem);
+ sendmem23_w1 = sendmem01_w1;
+ } else {
+ sendext01_w0 = vdupq_n_u64((NIX_SUBDC_EXT << 60));
+ }
sendext23_w0 = sendext01_w0;
- sendext01_w1 = vdupq_n_u64(12 | 12U << 24);
+
+ if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)
+ sendext01_w1 = vdupq_n_u64(12 | 12U << 24);
+ else
+ sendext01_w1 = vdupq_n_u64(0);
sendext23_w1 = sendext01_w1;
}
/* Get LMT base address and LMT ID as lcore id */
ROC_LMT_BASE_ID_GET(laddr, lmt_id);
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ ROC_LMT_CPT_BASE_ID_GET(c_laddr, c_lmt_id);
+ c_io_addr = txq->cpt_io_addr;
+ sa_base = txq->sa_base;
+ }
+
left = pkts;
again:
/* Number of packets to prepare depends on offloads enabled. */
burst = left > cn10k_nix_pkts_per_vec_brst(flags) ?
cn10k_nix_pkts_per_vec_brst(flags) :
left;
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)) {
+ wd.data128 = 0;
+ shift = 16;
+ }
lnum = 0;
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ loff = 0;
+ c_loff = 0;
+ c_lnum = 0;
+ c_shft = 16;
+ }
+
for (i = 0; i < burst; i += NIX_DESCS_PER_LOOP) {
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && c_lnum + 2 > 16) {
+ burst = i;
+ break;
+ }
+
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ uint8_t j;
+
+ for (j = 0; j < NIX_DESCS_PER_LOOP; j++) {
+ struct rte_mbuf *m = tx_pkts[j];
+
+ /* Get dwords based on nb_segs. */
+ segdw[j] = NIX_NB_SEGS_TO_SEGDW(m->nb_segs);
+ /* Add dwords based on offloads. */
+ segdw[j] += 1 + /* SEND HDR */
+ !!(flags & NIX_TX_NEED_EXT_HDR) +
+ !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
+ }
+
+ /* Check if there are enough LMTLINES for this loop */
+ if (lnum + 4 > 32) {
+ uint8_t ldwords_con = 0, lneeded = 0;
+ for (j = 0; j < NIX_DESCS_PER_LOOP; j++) {
+ ldwords_con += segdw[j];
+ if (ldwords_con > 8) {
+ lneeded += 1;
+ ldwords_con = segdw[j];
+ }
+ }
+ lneeded += 1;
+ if (lnum + lneeded > 32) {
+ burst = i;
+ break;
+ }
+ }
+ }
/* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
senddesc01_w0 =
vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
sendext23_w1 = sendext01_w1;
}
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ /* Reset send mem alg to SETTSTMP from SUB*/
+ sendmem01_w0 = vbicq_u64(sendmem01_w0,
+ vdupq_n_u64(BIT_ULL(59)));
+ /* Reset send mem address to default. */
+ sendmem01_w1 =
+ vbicq_u64(sendmem01_w1, vdupq_n_u64(0xF));
+ sendmem23_w0 = sendmem01_w0;
+ sendmem23_w1 = sendmem01_w1;
+ }
+
+ if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ /* Clear the LSO enable bit. */
+ sendext01_w0 = vbicq_u64(sendext01_w0,
+ vdupq_n_u64(BIT_ULL(14)));
+ sendext23_w0 = sendext01_w0;
+ }
+
/* Move mbufs to iova */
mbuf0 = (uint64_t *)tx_pkts[0];
mbuf1 = (uint64_t *)tx_pkts[1];
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6 assumed) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
- 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
- 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
- 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
- 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
- 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x22, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x32, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x03, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
};
{
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 |
- * PKT_TX_TCP_CKSUM
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x22, /* PKT_TX_IPV4 |
- * PKT_TX_SCTP_CKSUM
+ 0x22, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x32, /* PKT_TX_IPV4 |
- * PKT_TX_UDP_CKSUM
+ 0x32, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x03, /* PKT_TX_IPV4 |
- * PKT_TX_IP_CKSUM
+ 0x03, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_IP_CKSUM
*/
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
},
if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
/* Tx ol_flag for vlan. */
- const uint64x2_t olv = {PKT_TX_VLAN, PKT_TX_VLAN};
+ const uint64x2_t olv = {RTE_MBUF_F_TX_VLAN, RTE_MBUF_F_TX_VLAN};
/* Bit enable for VLAN1 */
const uint64x2_t mlv = {BIT_ULL(49), BIT_ULL(49)};
/* Tx ol_flag for QnQ. */
- const uint64x2_t olq = {PKT_TX_QINQ, PKT_TX_QINQ};
+ const uint64x2_t olq = {RTE_MBUF_F_TX_QINQ, RTE_MBUF_F_TX_QINQ};
/* Bit enable for VLAN0 */
const uint64x2_t mlq = {BIT_ULL(48), BIT_ULL(48)};
/* Load vlan values from packet. outer is VLAN 0 */
sendext23_w1 = vorrq_u64(sendext23_w1, ytmp128);
}
- if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ /* Tx ol_flag for timestamp. */
+ const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
+ RTE_MBUF_F_TX_IEEE1588_TMST};
+ /* Set send mem alg to SUB. */
+ const uint64x2_t alg = {BIT_ULL(59), BIT_ULL(59)};
+ /* Increment send mem address by 8. */
+ const uint64x2_t addr = {0x8, 0x8};
+
+ xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
+ ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
+
+ /* Check if timestamp is requested and generate inverted
+ * mask as we need not make any changes to default cmd
+ * value.
+ */
+ xtmp128 = vmvnq_u32(vtstq_u64(olf, xtmp128));
+ ytmp128 = vmvnq_u32(vtstq_u64(olf, ytmp128));
+
+ /* Change send mem address to an 8 byte offset when
+ * TSTMP is disabled.
+ */
+ sendmem01_w1 = vaddq_u64(sendmem01_w1,
+ vandq_u64(xtmp128, addr));
+ sendmem23_w1 = vaddq_u64(sendmem23_w1,
+ vandq_u64(ytmp128, addr));
+ /* Change send mem alg to SUB when TSTMP is disabled. */
+ sendmem01_w0 = vorrq_u64(sendmem01_w0,
+ vandq_u64(xtmp128, alg));
+ sendmem23_w0 = vorrq_u64(sendmem23_w0,
+ vandq_u64(ytmp128, alg));
+
+ cmd3[0] = vzip1q_u64(sendmem01_w0, sendmem01_w1);
+ cmd3[1] = vzip2q_u64(sendmem01_w0, sendmem01_w1);
+ cmd3[2] = vzip1q_u64(sendmem23_w0, sendmem23_w1);
+ cmd3[3] = vzip2q_u64(sendmem23_w0, sendmem23_w1);
+ }
+
+ if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ const uint64_t lso_fmt = txq->lso_tun_fmt;
+ uint64_t sx_w0[NIX_DESCS_PER_LOOP];
+ uint64_t sd_w1[NIX_DESCS_PER_LOOP];
+
+ /* Extract SD W1 as we need to set L4 types. */
+ vst1q_u64(sd_w1, senddesc01_w1);
+ vst1q_u64(sd_w1 + 2, senddesc23_w1);
+
+ /* Extract SX W0 as we need to set LSO fields. */
+ vst1q_u64(sx_w0, sendext01_w0);
+ vst1q_u64(sx_w0 + 2, sendext23_w0);
+
+ /* Extract ol_flags. */
+ xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
+ ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
+
+ /* Prepare individual mbufs. */
+ cn10k_nix_prepare_tso(tx_pkts[0],
+ (union nix_send_hdr_w1_u *)&sd_w1[0],
+ (union nix_send_ext_w0_u *)&sx_w0[0],
+ vgetq_lane_u64(xtmp128, 0), flags, lso_fmt);
+
+ cn10k_nix_prepare_tso(tx_pkts[1],
+ (union nix_send_hdr_w1_u *)&sd_w1[1],
+ (union nix_send_ext_w0_u *)&sx_w0[1],
+ vgetq_lane_u64(xtmp128, 1), flags, lso_fmt);
+
+ cn10k_nix_prepare_tso(tx_pkts[2],
+ (union nix_send_hdr_w1_u *)&sd_w1[2],
+ (union nix_send_ext_w0_u *)&sx_w0[2],
+ vgetq_lane_u64(ytmp128, 0), flags, lso_fmt);
+
+ cn10k_nix_prepare_tso(tx_pkts[3],
+ (union nix_send_hdr_w1_u *)&sd_w1[3],
+ (union nix_send_ext_w0_u *)&sx_w0[3],
+ vgetq_lane_u64(ytmp128, 1), flags, lso_fmt);
+
+ senddesc01_w1 = vld1q_u64(sd_w1);
+ senddesc23_w1 = vld1q_u64(sd_w1 + 2);
+
+ sendext01_w0 = vld1q_u64(sx_w0);
+ sendext23_w0 = vld1q_u64(sx_w0 + 2);
+ }
+
+ if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
+ !(flags & NIX_TX_MULTI_SEG_F) &&
+ !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
/* Set don't free bit if reference count > 1 */
xmask01 = vdupq_n_u64(0);
xmask23 = xmask01;
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0))
vsetq_lane_u64(0x80000, xmask01, 0);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf0)->pool,
(void **)&mbuf0, 1, 0);
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1))
vsetq_lane_u64(0x80000, xmask01, 1);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf1)->pool,
(void **)&mbuf1, 1, 0);
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2))
vsetq_lane_u64(0x80000, xmask23, 0);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf2)->pool,
(void **)&mbuf2, 1, 0);
if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3))
vsetq_lane_u64(0x80000, xmask23, 1);
else
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf3)->pool,
(void **)&mbuf3, 1, 0);
senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
- } else {
+ } else if (!(flags & NIX_TX_MULTI_SEG_F) &&
+ !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
/* Move mbufs to iova */
mbuf0 = (uint64_t *)tx_pkts[0];
mbuf1 = (uint64_t *)tx_pkts[1];
/* Mark mempool object as "put" since
* it is freed by NIX
*/
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf0)->pool,
(void **)&mbuf0, 1, 0);
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf1)->pool,
(void **)&mbuf1, 1, 0);
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf2)->pool,
(void **)&mbuf2, 1, 0);
- __mempool_check_cookies(
+ RTE_MEMPOOL_CHECK_COOKIES(
((struct rte_mbuf *)mbuf3)->pool,
(void **)&mbuf3, 1, 0);
}
cmd2[3] = vzip2q_u64(sendext23_w0, sendext23_w1);
}
- if (flags & NIX_TX_NEED_EXT_HDR) {
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ const uint64x2_t olf = {RTE_MBUF_F_TX_SEC_OFFLOAD,
+ RTE_MBUF_F_TX_SEC_OFFLOAD};
+ uintptr_t next;
+ uint8_t dw;
+
+ /* Extract ol_flags. */
+ xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
+ ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
+
+ xtmp128 = vtstq_u64(olf, xtmp128);
+ ytmp128 = vtstq_u64(olf, ytmp128);
+
+ /* Process mbuf0 */
+ dw = cn10k_nix_tx_dwords(flags, segdw[0]);
+ if (vgetq_lane_u64(xtmp128, 0))
+ cn10k_nix_prep_sec_vec(tx_pkts[0], &cmd0[0],
+ &cmd1[0], &next, c_laddr,
+ &c_lnum, &c_loff,
+ &c_shft, sa_base, flags);
+ else
+ cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+ &shift, &wd.data128, &next);
+
+ /* Store mbuf0 to LMTLINE/CPT NIXTX area */
+ cn10k_nix_xmit_store(tx_pkts[0], segdw[0], next,
+ cmd0[0], cmd1[0], cmd2[0], cmd3[0],
+ flags);
+
+ /* Process mbuf1 */
+ dw = cn10k_nix_tx_dwords(flags, segdw[1]);
+ if (vgetq_lane_u64(xtmp128, 1))
+ cn10k_nix_prep_sec_vec(tx_pkts[1], &cmd0[1],
+ &cmd1[1], &next, c_laddr,
+ &c_lnum, &c_loff,
+ &c_shft, sa_base, flags);
+ else
+ cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+ &shift, &wd.data128, &next);
+
+ /* Store mbuf1 to LMTLINE/CPT NIXTX area */
+ cn10k_nix_xmit_store(tx_pkts[1], segdw[1], next,
+ cmd0[1], cmd1[1], cmd2[1], cmd3[1],
+ flags);
+
+ /* Process mbuf2 */
+ dw = cn10k_nix_tx_dwords(flags, segdw[2]);
+ if (vgetq_lane_u64(ytmp128, 0))
+ cn10k_nix_prep_sec_vec(tx_pkts[2], &cmd0[2],
+ &cmd1[2], &next, c_laddr,
+ &c_lnum, &c_loff,
+ &c_shft, sa_base, flags);
+ else
+ cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+ &shift, &wd.data128, &next);
+
+ /* Store mbuf2 to LMTLINE/CPT NIXTX area */
+ cn10k_nix_xmit_store(tx_pkts[2], segdw[2], next,
+ cmd0[2], cmd1[2], cmd2[2], cmd3[2],
+ flags);
+
+ /* Process mbuf3 */
+ dw = cn10k_nix_tx_dwords(flags, segdw[3]);
+ if (vgetq_lane_u64(ytmp128, 1))
+ cn10k_nix_prep_sec_vec(tx_pkts[3], &cmd0[3],
+ &cmd1[3], &next, c_laddr,
+ &c_lnum, &c_loff,
+ &c_shft, sa_base, flags);
+ else
+ cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+ &shift, &wd.data128, &next);
+
+ /* Store mbuf3 to LMTLINE/CPT NIXTX area */
+ cn10k_nix_xmit_store(tx_pkts[3], segdw[3], next,
+ cmd0[3], cmd1[3], cmd2[3], cmd3[3],
+ flags);
+
+ } else if (flags & NIX_TX_MULTI_SEG_F) {
+ uint8_t j;
+
+ segdw[4] = 8;
+ j = cn10k_nix_prep_lmt_mseg_vector(tx_pkts, cmd0, cmd1,
+ cmd2, cmd3, segdw,
+ (uint64_t *)
+ LMT_OFF(laddr, lnum,
+ 0),
+ &wd.data128, &shift,
+ flags);
+ lnum += j;
+ } else if (flags & NIX_TX_NEED_EXT_HDR) {
/* Store the prepared send desc to LMT lines */
- vst1q_u64(LMT_OFF(laddr, lnum, 0), cmd0[0]);
- vst1q_u64(LMT_OFF(laddr, lnum, 16), cmd2[0]);
- vst1q_u64(LMT_OFF(laddr, lnum, 32), cmd1[0]);
- vst1q_u64(LMT_OFF(laddr, lnum, 48), cmd0[1]);
- vst1q_u64(LMT_OFF(laddr, lnum, 64), cmd2[1]);
- vst1q_u64(LMT_OFF(laddr, lnum, 80), cmd1[1]);
- lnum += 1;
- vst1q_u64(LMT_OFF(laddr, lnum, 0), cmd0[2]);
- vst1q_u64(LMT_OFF(laddr, lnum, 16), cmd2[2]);
- vst1q_u64(LMT_OFF(laddr, lnum, 32), cmd1[2]);
- vst1q_u64(LMT_OFF(laddr, lnum, 48), cmd0[3]);
- vst1q_u64(LMT_OFF(laddr, lnum, 64), cmd2[3]);
- vst1q_u64(LMT_OFF(laddr, lnum, 80), cmd1[3]);
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ vst1q_u64(LMT_OFF(laddr, lnum, 0), cmd0[0]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 16), cmd2[0]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 32), cmd1[0]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 48), cmd3[0]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 64), cmd0[1]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 80), cmd2[1]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 96), cmd1[1]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 112), cmd3[1]);
+ lnum += 1;
+ vst1q_u64(LMT_OFF(laddr, lnum, 0), cmd0[2]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 16), cmd2[2]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 32), cmd1[2]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 48), cmd3[2]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 64), cmd0[3]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 80), cmd2[3]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 96), cmd1[3]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 112), cmd3[3]);
+ } else {
+ vst1q_u64(LMT_OFF(laddr, lnum, 0), cmd0[0]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 16), cmd2[0]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 32), cmd1[0]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 48), cmd0[1]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 64), cmd2[1]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 80), cmd1[1]);
+ lnum += 1;
+ vst1q_u64(LMT_OFF(laddr, lnum, 0), cmd0[2]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 16), cmd2[2]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 32), cmd1[2]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 48), cmd0[3]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 64), cmd2[3]);
+ vst1q_u64(LMT_OFF(laddr, lnum, 80), cmd1[3]);
+ }
lnum += 1;
} else {
/* Store the prepared send desc to LMT lines */
lnum += 1;
}
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ tx_pkts[0]->next = NULL;
+ tx_pkts[1]->next = NULL;
+ tx_pkts[2]->next = NULL;
+ tx_pkts[3]->next = NULL;
+ }
+
tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP;
}
+ /* Roundup lnum to last line if it is partial */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ lnum = lnum + !!loff;
+ wd.data128 = wd.data128 |
+ (((__uint128_t)(((loff >> 4) - 1) & 0x7) << shift));
+ }
+
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
+ wd.data[0] >>= 16;
+
+ if ((flags & NIX_TX_VWQE_F) && !(ws[1] & BIT_ULL(35)))
+ ws[1] = roc_sso_hws_head_wait(ws[0]);
+
+ left -= burst;
+
+ /* Submit CPT instructions if any */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F)
+ cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
+ c_shft);
+
/* Trigger LMTST */
if (lnum > 16) {
- data = cn10k_nix_tx_steor_vec_data(flags);
- pa = io_addr | (data & 0x7) << 4;
- data &= ~0x7ULL;
- data |= (15ULL << 12);
- data |= (uint64_t)lmt_id;
+ if (!(flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)))
+ wd.data[0] = cn10k_nix_tx_steor_vec_data(flags);
+
+ pa = io_addr | (wd.data[0] & 0x7) << 4;
+ wd.data[0] &= ~0x7ULL;
+
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
+ wd.data[0] <<= 16;
+
+ wd.data[0] |= (15ULL << 12);
+ wd.data[0] |= (uint64_t)lmt_id;
/* STEOR0 */
- roc_lmt_submit_steorl(data, pa);
+ roc_lmt_submit_steorl(wd.data[0], pa);
- data = cn10k_nix_tx_steor_vec_data(flags);
- pa = io_addr | (data & 0x7) << 4;
- data &= ~0x7ULL;
- data |= ((uint64_t)(lnum - 17)) << 12;
- data |= (uint64_t)(lmt_id + 16);
+ if (!(flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)))
+ wd.data[1] = cn10k_nix_tx_steor_vec_data(flags);
+
+ pa = io_addr | (wd.data[1] & 0x7) << 4;
+ wd.data[1] &= ~0x7ULL;
+
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
+ wd.data[1] <<= 16;
+
+ wd.data[1] |= ((uint64_t)(lnum - 17)) << 12;
+ wd.data[1] |= (uint64_t)(lmt_id + 16);
/* STEOR1 */
- roc_lmt_submit_steorl(data, pa);
+ roc_lmt_submit_steorl(wd.data[1], pa);
} else if (lnum) {
- data = cn10k_nix_tx_steor_vec_data(flags);
- pa = io_addr | (data & 0x7) << 4;
- data &= ~0x7ULL;
- data |= ((uint64_t)(lnum - 1)) << 12;
- data |= lmt_id;
+ if (!(flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)))
+ wd.data[0] = cn10k_nix_tx_steor_vec_data(flags);
+
+ pa = io_addr | (wd.data[0] & 0x7) << 4;
+ wd.data[0] &= ~0x7ULL;
+
+ if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
+ wd.data[0] <<= 16;
+
+ wd.data[0] |= ((uint64_t)(lnum - 1)) << 12;
+ wd.data[0] |= lmt_id;
/* STEOR0 */
- roc_lmt_submit_steorl(data, pa);
+ roc_lmt_submit_steorl(wd.data[0], pa);
}
- left -= burst;
rte_io_wmb();
if (left)
goto again;
- if (unlikely(scalar))
- pkts += cn10k_nix_xmit_pkts(tx_queue, tx_pkts, scalar, cmd,
- flags);
+ if (unlikely(scalar)) {
+ if (flags & NIX_TX_MULTI_SEG_F)
+ pkts += cn10k_nix_xmit_pkts_mseg(tx_queue, ws, tx_pkts,
+ scalar, cmd, flags);
+ else
+ pkts += cn10k_nix_xmit_pkts(tx_queue, ws, tx_pkts,
+ scalar, cmd, flags);
+ }
return pkts;
}
#else
static __rte_always_inline uint16_t
-cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t pkts, uint64_t *cmd, const uint16_t flags)
+cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
+ struct rte_mbuf **tx_pkts, uint16_t pkts,
+ uint64_t *cmd, const uint16_t flags)
{
+ RTE_SET_USED(ws);
RTE_SET_USED(tx_queue);
RTE_SET_USED(tx_pkts);
RTE_SET_USED(pkts);
#define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F
#define TSO_F NIX_TX_OFFLOAD_TSO_F
#define TSP_F NIX_TX_OFFLOAD_TSTAMP_F
-
-/* [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
-#define NIX_TX_FASTPATH_MODES \
-T(no_offload, 0, 0, 0, 0, 0, 0, 4, \
- NIX_TX_OFFLOAD_NONE) \
-T(l3l4csum, 0, 0, 0, 0, 0, 1, 4, \
- L3L4CSUM_F) \
-T(ol3ol4csum, 0, 0, 0, 0, 1, 0, 4, \
- OL3OL4CSUM_F) \
-T(ol3ol4csum_l3l4csum, 0, 0, 0, 0, 1, 1, 4, \
- OL3OL4CSUM_F | L3L4CSUM_F) \
-T(vlan, 0, 0, 0, 1, 0, 0, 6, \
- VLAN_F) \
-T(vlan_l3l4csum, 0, 0, 0, 1, 0, 1, 6, \
- VLAN_F | L3L4CSUM_F) \
-T(vlan_ol3ol4csum, 0, 0, 0, 1, 1, 0, 6, \
- VLAN_F | OL3OL4CSUM_F) \
-T(vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 1, 6, \
- VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(noff, 0, 0, 1, 0, 0, 0, 4, \
- NOFF_F) \
-T(noff_l3l4csum, 0, 0, 1, 0, 0, 1, 4, \
- NOFF_F | L3L4CSUM_F) \
-T(noff_ol3ol4csum, 0, 0, 1, 0, 1, 0, 4, \
- NOFF_F | OL3OL4CSUM_F) \
-T(noff_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 1, 1, 4, \
- NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(noff_vlan, 0, 0, 1, 1, 0, 0, 6, \
- NOFF_F | VLAN_F) \
-T(noff_vlan_l3l4csum, 0, 0, 1, 1, 0, 1, 6, \
- NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(noff_vlan_ol3ol4csum, 0, 0, 1, 1, 1, 0, 6, \
- NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 1, 6, \
- NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso, 0, 1, 0, 0, 0, 0, 6, \
- TSO_F) \
-T(tso_l3l4csum, 0, 1, 0, 0, 0, 1, 6, \
- TSO_F | L3L4CSUM_F) \
-T(tso_ol3ol4csum, 0, 1, 0, 0, 1, 0, 6, \
- TSO_F | OL3OL4CSUM_F) \
-T(tso_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 1, 1, 6, \
- TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_vlan, 0, 1, 0, 1, 0, 0, 6, \
- TSO_F | VLAN_F) \
-T(tso_vlan_l3l4csum, 0, 1, 0, 1, 0, 1, 6, \
- TSO_F | VLAN_F | L3L4CSUM_F) \
-T(tso_vlan_ol3ol4csum, 0, 1, 0, 1, 1, 0, 6, \
- TSO_F | VLAN_F | OL3OL4CSUM_F) \
-T(tso_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 1, 6, \
- TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_noff, 0, 1, 1, 0, 0, 0, 6, \
- TSO_F | NOFF_F) \
-T(tso_noff_l3l4csum, 0, 1, 1, 0, 0, 1, 6, \
- TSO_F | NOFF_F | L3L4CSUM_F) \
-T(tso_noff_ol3ol4csum, 0, 1, 1, 0, 1, 0, 6, \
- TSO_F | NOFF_F | OL3OL4CSUM_F) \
-T(tso_noff_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 1, 1, 6, \
- TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(tso_noff_vlan, 0, 1, 1, 1, 0, 0, 6, \
- TSO_F | NOFF_F | VLAN_F) \
-T(tso_noff_vlan_l3l4csum, 0, 1, 1, 1, 0, 1, 6, \
- TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(tso_noff_vlan_ol3ol4csum, 0, 1, 1, 1, 1, 0, 6, \
- TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(tso_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 1, 6, \
- TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts, 1, 0, 0, 0, 0, 0, 8, \
- TSP_F) \
-T(ts_l3l4csum, 1, 0, 0, 0, 0, 1, 8, \
- TSP_F | L3L4CSUM_F) \
-T(ts_ol3ol4csum, 1, 0, 0, 0, 1, 0, 8, \
- TSP_F | OL3OL4CSUM_F) \
-T(ts_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 1, 1, 8, \
- TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_vlan, 1, 0, 0, 1, 0, 0, 8, \
- TSP_F | VLAN_F) \
-T(ts_vlan_l3l4csum, 1, 0, 0, 1, 0, 1, 8, \
- TSP_F | VLAN_F | L3L4CSUM_F) \
-T(ts_vlan_ol3ol4csum, 1, 0, 0, 1, 1, 0, 8, \
- TSP_F | VLAN_F | OL3OL4CSUM_F) \
-T(ts_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 1, 8, \
- TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_noff, 1, 0, 1, 0, 0, 0, 8, \
- TSP_F | NOFF_F) \
-T(ts_noff_l3l4csum, 1, 0, 1, 0, 0, 1, 8, \
- TSP_F | NOFF_F | L3L4CSUM_F) \
-T(ts_noff_ol3ol4csum, 1, 0, 1, 0, 1, 0, 8, \
- TSP_F | NOFF_F | OL3OL4CSUM_F) \
-T(ts_noff_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 1, 1, 8, \
- TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_noff_vlan, 1, 0, 1, 1, 0, 0, 8, \
- TSP_F | NOFF_F | VLAN_F) \
-T(ts_noff_vlan_l3l4csum, 1, 0, 1, 1, 0, 1, 8, \
- TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(ts_noff_vlan_ol3ol4csum, 1, 0, 1, 1, 1, 0, 8, \
- TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(ts_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 1, 8, \
- TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_tso, 1, 1, 0, 0, 0, 0, 8, \
- TSP_F | TSO_F) \
-T(ts_tso_l3l4csum, 1, 1, 0, 0, 0, 1, 8, \
- TSP_F | TSO_F | L3L4CSUM_F) \
-T(ts_tso_ol3ol4csum, 1, 1, 0, 0, 1, 0, 8, \
- TSP_F | TSO_F | OL3OL4CSUM_F) \
-T(ts_tso_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 1, 1, 8, \
- TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_tso_vlan, 1, 1, 0, 1, 0, 0, 8, \
- TSP_F | TSO_F | VLAN_F) \
-T(ts_tso_vlan_l3l4csum, 1, 1, 0, 1, 0, 1, 8, \
- TSP_F | TSO_F | VLAN_F | L3L4CSUM_F) \
-T(ts_tso_vlan_ol3ol4csum, 1, 1, 0, 1, 1, 0, 8, \
- TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
-T(ts_tso_vlan_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 1, 1, 8, \
- TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_tso_noff, 1, 1, 1, 0, 0, 0, 8, \
- TSP_F | TSO_F | NOFF_F) \
-T(ts_tso_noff_l3l4csum, 1, 1, 1, 0, 0, 1, 8, \
- TSP_F | TSO_F | NOFF_F | L3L4CSUM_F) \
-T(ts_tso_noff_ol3ol4csum, 1, 1, 1, 0, 1, 0, 8, \
- TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
-T(ts_tso_noff_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 1, 1, 8, \
- TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
-T(ts_tso_noff_vlan, 1, 1, 1, 1, 0, 0, 8, \
- TSP_F | TSO_F | NOFF_F | VLAN_F) \
-T(ts_tso_noff_vlan_l3l4csum, 1, 1, 1, 1, 0, 1, 8, \
- TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
-T(ts_tso_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 1, 0, 8, \
- TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
-T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 1, 8, \
- TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
-
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
+#define T_SEC_F NIX_TX_OFFLOAD_SECURITY_F
+
+/* [T_SEC_F] [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
+#define NIX_TX_FASTPATH_MODES_0_15 \
+ T(no_offload, 4, NIX_TX_OFFLOAD_NONE) \
+ T(l3l4csum, 4, L3L4CSUM_F) \
+ T(ol3ol4csum, 4, OL3OL4CSUM_F) \
+ T(ol3ol4csum_l3l4csum, 4, OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(vlan, 6, VLAN_F) \
+ T(vlan_l3l4csum, 6, VLAN_F | L3L4CSUM_F) \
+ T(vlan_ol3ol4csum, 6, VLAN_F | OL3OL4CSUM_F) \
+ T(vlan_ol3ol4csum_l3l4csum, 6, VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(noff, 4, NOFF_F) \
+ T(noff_l3l4csum, 4, NOFF_F | L3L4CSUM_F) \
+ T(noff_ol3ol4csum, 4, NOFF_F | OL3OL4CSUM_F) \
+ T(noff_ol3ol4csum_l3l4csum, 4, NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(noff_vlan, 6, NOFF_F | VLAN_F) \
+ T(noff_vlan_l3l4csum, 6, NOFF_F | VLAN_F | L3L4CSUM_F) \
+ T(noff_vlan_ol3ol4csum, 6, NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+ T(noff_vlan_ol3ol4csum_l3l4csum, 6, \
+ NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+
+#define NIX_TX_FASTPATH_MODES_16_31 \
+ T(tso, 6, TSO_F) \
+ T(tso_l3l4csum, 6, TSO_F | L3L4CSUM_F) \
+ T(tso_ol3ol4csum, 6, TSO_F | OL3OL4CSUM_F) \
+ T(tso_ol3ol4csum_l3l4csum, 6, TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(tso_vlan, 6, TSO_F | VLAN_F) \
+ T(tso_vlan_l3l4csum, 6, TSO_F | VLAN_F | L3L4CSUM_F) \
+ T(tso_vlan_ol3ol4csum, 6, TSO_F | VLAN_F | OL3OL4CSUM_F) \
+ T(tso_vlan_ol3ol4csum_l3l4csum, 6, \
+ TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(tso_noff, 6, TSO_F | NOFF_F) \
+ T(tso_noff_l3l4csum, 6, TSO_F | NOFF_F | L3L4CSUM_F) \
+ T(tso_noff_ol3ol4csum, 6, TSO_F | NOFF_F | OL3OL4CSUM_F) \
+ T(tso_noff_ol3ol4csum_l3l4csum, 6, \
+ TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(tso_noff_vlan, 6, TSO_F | NOFF_F | VLAN_F) \
+ T(tso_noff_vlan_l3l4csum, 6, TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+ T(tso_noff_vlan_ol3ol4csum, 6, TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+ T(tso_noff_vlan_ol3ol4csum_l3l4csum, 6, \
+ TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+
+#define NIX_TX_FASTPATH_MODES_32_47 \
+ T(ts, 8, TSP_F) \
+ T(ts_l3l4csum, 8, TSP_F | L3L4CSUM_F) \
+ T(ts_ol3ol4csum, 8, TSP_F | OL3OL4CSUM_F) \
+ T(ts_ol3ol4csum_l3l4csum, 8, TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(ts_vlan, 8, TSP_F | VLAN_F) \
+ T(ts_vlan_l3l4csum, 8, TSP_F | VLAN_F | L3L4CSUM_F) \
+ T(ts_vlan_ol3ol4csum, 8, TSP_F | VLAN_F | OL3OL4CSUM_F) \
+ T(ts_vlan_ol3ol4csum_l3l4csum, 8, \
+ TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(ts_noff, 8, TSP_F | NOFF_F) \
+ T(ts_noff_l3l4csum, 8, TSP_F | NOFF_F | L3L4CSUM_F) \
+ T(ts_noff_ol3ol4csum, 8, TSP_F | NOFF_F | OL3OL4CSUM_F) \
+ T(ts_noff_ol3ol4csum_l3l4csum, 8, \
+ TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(ts_noff_vlan, 8, TSP_F | NOFF_F | VLAN_F) \
+ T(ts_noff_vlan_l3l4csum, 8, TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+ T(ts_noff_vlan_ol3ol4csum, 8, TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+ T(ts_noff_vlan_ol3ol4csum_l3l4csum, 8, \
+ TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+
+#define NIX_TX_FASTPATH_MODES_48_63 \
+ T(ts_tso, 8, TSP_F | TSO_F) \
+ T(ts_tso_l3l4csum, 8, TSP_F | TSO_F | L3L4CSUM_F) \
+ T(ts_tso_ol3ol4csum, 8, TSP_F | TSO_F | OL3OL4CSUM_F) \
+ T(ts_tso_ol3ol4csum_l3l4csum, 8, \
+ TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(ts_tso_vlan, 8, TSP_F | TSO_F | VLAN_F) \
+ T(ts_tso_vlan_l3l4csum, 8, TSP_F | TSO_F | VLAN_F | L3L4CSUM_F) \
+ T(ts_tso_vlan_ol3ol4csum, 8, TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
+ T(ts_tso_vlan_ol3ol4csum_l3l4csum, 8, \
+ TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(ts_tso_noff, 8, TSP_F | TSO_F | NOFF_F) \
+ T(ts_tso_noff_l3l4csum, 8, TSP_F | TSO_F | NOFF_F | L3L4CSUM_F) \
+ T(ts_tso_noff_ol3ol4csum, 8, TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
+ T(ts_tso_noff_ol3ol4csum_l3l4csum, 8, \
+ TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(ts_tso_noff_vlan, 8, TSP_F | TSO_F | NOFF_F | VLAN_F) \
+ T(ts_tso_noff_vlan_l3l4csum, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+ T(ts_tso_noff_vlan_ol3ol4csum, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+ T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 8, \
+ TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+
+#define NIX_TX_FASTPATH_MODES_64_79 \
+ T(sec, 4, T_SEC_F) \
+ T(sec_l3l4csum, 4, T_SEC_F | L3L4CSUM_F) \
+ T(sec_ol3ol4csum, 4, T_SEC_F | OL3OL4CSUM_F) \
+ T(sec_ol3ol4csum_l3l4csum, 4, T_SEC_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_vlan, 6, T_SEC_F | VLAN_F) \
+ T(sec_vlan_l3l4csum, 6, T_SEC_F | VLAN_F | L3L4CSUM_F) \
+ T(sec_vlan_ol3ol4csum, 6, T_SEC_F | VLAN_F | OL3OL4CSUM_F) \
+ T(sec_vlan_ol3ol4csum_l3l4csum, 6, \
+ T_SEC_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_noff, 4, T_SEC_F | NOFF_F) \
+ T(sec_noff_l3l4csum, 4, T_SEC_F | NOFF_F | L3L4CSUM_F) \
+ T(sec_noff_ol3ol4csum, 4, T_SEC_F | NOFF_F | OL3OL4CSUM_F) \
+ T(sec_noff_ol3ol4csum_l3l4csum, 4, \
+ T_SEC_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_noff_vlan, 6, T_SEC_F | NOFF_F | VLAN_F) \
+ T(sec_noff_vlan_l3l4csum, 6, T_SEC_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+ T(sec_noff_vlan_ol3ol4csum, 6, \
+ T_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+ T(sec_noff_vlan_ol3ol4csum_l3l4csum, 6, \
+ T_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+
+#define NIX_TX_FASTPATH_MODES_80_95 \
+ T(sec_tso, 6, T_SEC_F | TSO_F) \
+ T(sec_tso_l3l4csum, 6, T_SEC_F | TSO_F | L3L4CSUM_F) \
+ T(sec_tso_ol3ol4csum, 6, T_SEC_F | TSO_F | OL3OL4CSUM_F) \
+ T(sec_tso_ol3ol4csum_l3l4csum, 6, \
+ T_SEC_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_tso_vlan, 6, T_SEC_F | TSO_F | VLAN_F) \
+ T(sec_tso_vlan_l3l4csum, 6, T_SEC_F | TSO_F | VLAN_F | L3L4CSUM_F) \
+ T(sec_tso_vlan_ol3ol4csum, 6, T_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
+ T(sec_tso_vlan_ol3ol4csum_l3l4csum, 6, \
+ T_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_tso_noff, 6, T_SEC_F | TSO_F | NOFF_F) \
+ T(sec_tso_noff_l3l4csum, 6, T_SEC_F | TSO_F | NOFF_F | L3L4CSUM_F) \
+ T(sec_tso_noff_ol3ol4csum, 6, T_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
+ T(sec_tso_noff_ol3ol4csum_l3l4csum, 6, \
+ T_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_tso_noff_vlan, 6, T_SEC_F | TSO_F | NOFF_F | VLAN_F) \
+ T(sec_tso_noff_vlan_l3l4csum, 6, \
+ T_SEC_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+ T(sec_tso_noff_vlan_ol3ol4csum, 6, \
+ T_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+ T(sec_tso_noff_vlan_ol3ol4csum_l3l4csum, 6, \
+ T_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+
+#define NIX_TX_FASTPATH_MODES_96_111 \
+ T(sec_ts, 8, T_SEC_F | TSP_F) \
+ T(sec_ts_l3l4csum, 8, T_SEC_F | TSP_F | L3L4CSUM_F) \
+ T(sec_ts_ol3ol4csum, 8, T_SEC_F | TSP_F | OL3OL4CSUM_F) \
+ T(sec_ts_ol3ol4csum_l3l4csum, 8, \
+ T_SEC_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_ts_vlan, 8, T_SEC_F | TSP_F | VLAN_F) \
+ T(sec_ts_vlan_l3l4csum, 8, T_SEC_F | TSP_F | VLAN_F | L3L4CSUM_F) \
+ T(sec_ts_vlan_ol3ol4csum, 8, T_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F) \
+ T(sec_ts_vlan_ol3ol4csum_l3l4csum, 8, \
+ T_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_ts_noff, 8, T_SEC_F | TSP_F | NOFF_F) \
+ T(sec_ts_noff_l3l4csum, 8, T_SEC_F | TSP_F | NOFF_F | L3L4CSUM_F) \
+ T(sec_ts_noff_ol3ol4csum, 8, T_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F) \
+ T(sec_ts_noff_ol3ol4csum_l3l4csum, 8, \
+ T_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_ts_noff_vlan, 8, T_SEC_F | TSP_F | NOFF_F | VLAN_F) \
+ T(sec_ts_noff_vlan_l3l4csum, 8, \
+ T_SEC_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+ T(sec_ts_noff_vlan_ol3ol4csum, 8, \
+ T_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+ T(sec_ts_noff_vlan_ol3ol4csum_l3l4csum, 8, \
+ T_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+
+#define NIX_TX_FASTPATH_MODES_112_127 \
+ T(sec_ts_tso, 8, T_SEC_F | TSP_F | TSO_F) \
+ T(sec_ts_tso_l3l4csum, 8, T_SEC_F | TSP_F | TSO_F | L3L4CSUM_F) \
+ T(sec_ts_tso_ol3ol4csum, 8, T_SEC_F | TSP_F | TSO_F | OL3OL4CSUM_F) \
+ T(sec_ts_tso_ol3ol4csum_l3l4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_ts_tso_vlan, 8, T_SEC_F | TSP_F | TSO_F | VLAN_F) \
+ T(sec_ts_tso_vlan_l3l4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | VLAN_F | L3L4CSUM_F) \
+ T(sec_ts_tso_vlan_ol3ol4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
+ T(sec_ts_tso_vlan_ol3ol4csum_l3l4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_ts_tso_noff, 8, T_SEC_F | TSP_F | TSO_F | NOFF_F) \
+ T(sec_ts_tso_noff_l3l4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | L3L4CSUM_F) \
+ T(sec_ts_tso_noff_ol3ol4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
+ T(sec_ts_tso_noff_ol3ol4csum_l3l4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ T(sec_ts_tso_noff_vlan, 8, T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F) \
+ T(sec_ts_tso_noff_vlan_l3l4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+ T(sec_ts_tso_noff_vlan_ol3ol4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+ T(sec_ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 8, \
+ T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \
+ L3L4CSUM_F)
+
+#define NIX_TX_FASTPATH_MODES \
+ NIX_TX_FASTPATH_MODES_0_15 \
+ NIX_TX_FASTPATH_MODES_16_31 \
+ NIX_TX_FASTPATH_MODES_32_47 \
+ NIX_TX_FASTPATH_MODES_48_63 \
+ NIX_TX_FASTPATH_MODES_64_79 \
+ NIX_TX_FASTPATH_MODES_80_95 \
+ NIX_TX_FASTPATH_MODES_96_111 \
+ NIX_TX_FASTPATH_MODES_112_127
+
+#define T(name, sz, flags) \
uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_##name( \
void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
- \
uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_mseg_##name( \
void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
- \
uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_vec_##name( \
+ void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
+ uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_vec_mseg_##name( \
void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);
NIX_TX_FASTPATH_MODES
#undef T
+#define NIX_TX_XMIT(fn, sz, flags) \
+ uint16_t __rte_noinline __rte_hot fn( \
+ void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts) \
+ { \
+ uint64_t cmd[sz]; \
+ /* For TSO inner checksum is a must */ \
+ if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
+ !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
+ return 0; \
+ return cn10k_nix_xmit_pkts(tx_queue, NULL, tx_pkts, pkts, cmd, \
+ flags); \
+ }
+
+#define NIX_TX_XMIT_MSEG(fn, sz, flags) \
+ uint16_t __rte_noinline __rte_hot fn( \
+ void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts) \
+ { \
+ uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; \
+ /* For TSO inner checksum is a must */ \
+ if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
+ !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
+ return 0; \
+ return cn10k_nix_xmit_pkts_mseg(tx_queue, NULL, tx_pkts, pkts, \
+ cmd, \
+ flags | NIX_TX_MULTI_SEG_F); \
+ }
+
+#define NIX_TX_XMIT_VEC(fn, sz, flags) \
+ uint16_t __rte_noinline __rte_hot fn( \
+ void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts) \
+ { \
+ uint64_t cmd[sz]; \
+ /* For TSO inner checksum is a must */ \
+ if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
+ !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
+ return 0; \
+ return cn10k_nix_xmit_pkts_vector(tx_queue, NULL, tx_pkts, \
+ pkts, cmd, (flags)); \
+ }
+
+#define NIX_TX_XMIT_VEC_MSEG(fn, sz, flags) \
+ uint16_t __rte_noinline __rte_hot fn( \
+ void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts) \
+ { \
+ uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; \
+ /* For TSO inner checksum is a must */ \
+ if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
+ !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
+ return 0; \
+ return cn10k_nix_xmit_pkts_vector( \
+ tx_queue, NULL, tx_pkts, pkts, cmd, \
+ (flags) | NIX_TX_MULTI_SEG_F); \
+ }
+
#endif /* __CN10K_TX_H__ */