txq->qconf.nb_desc = nb_desc;
memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
+ txq->lso_tun_fmt = dev->lso_tun_fmt;
otx2_nix_form_default_desc(txq);
otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
struct otx2_mbox *mbox = dev->mbox;
struct nix_lso_format_cfg_rsp *rsp;
struct nix_lso_format_cfg *req;
- uint8_t base;
+ uint8_t *fmt;
int rc;
/* Skip if TSO was not requested */
if (rc)
return rc;
- base = rsp->lso_format_idx;
- if (base != NIX_LSO_FORMAT_IDX_TSOV4)
+ if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV4)
return -EFAULT;
- dev->lso_base_idx = base;
- otx2_nix_dbg("tcpv4 lso fmt=%u", base);
+ otx2_nix_dbg("tcpv4 lso fmt=%u", rsp->lso_format_idx);
/*
if (rc)
return rc;
- if (rsp->lso_format_idx != base + 1)
+ if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV6)
return -EFAULT;
- otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
+ otx2_nix_dbg("tcpv6 lso fmt=%u\n", rsp->lso_format_idx);
/*
* IPv4/UDP/TUN HDR/IPv4/TCP LSO
if (rc)
return rc;
- if (rsp->lso_format_idx != base + 2)
- return -EFAULT;
- otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
+ dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx;
+ otx2_nix_dbg("udp tun v4v4 fmt=%u\n", rsp->lso_format_idx);
/*
* IPv4/UDP/TUN HDR/IPv6/TCP LSO
if (rc)
return rc;
- if (rsp->lso_format_idx != base + 3)
- return -EFAULT;
- otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
+ dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx;
+ otx2_nix_dbg("udp tun v4v6 fmt=%u\n", rsp->lso_format_idx);
/*
* IPv6/UDP/TUN HDR/IPv4/TCP LSO
if (rc)
return rc;
- if (rsp->lso_format_idx != base + 4)
- return -EFAULT;
- otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
+ dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx;
+ otx2_nix_dbg("udp tun v6v4 fmt=%u\n", rsp->lso_format_idx);
/*
* IPv6/UDP/TUN HDR/IPv6/TCP LSO
rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (rc)
return rc;
- if (rsp->lso_format_idx != base + 5)
- return -EFAULT;
- otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
+
+ dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx;
+ otx2_nix_dbg("udp tun v6v6 fmt=%u\n", rsp->lso_format_idx);
/*
* IPv4/TUN HDR/IPv4/TCP LSO
if (rc)
return rc;
- if (rsp->lso_format_idx != base + 6)
- return -EFAULT;
- otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6);
+ dev->lso_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx;
+ otx2_nix_dbg("tun v4v4 fmt=%u\n", rsp->lso_format_idx);
/*
* IPv4/TUN HDR/IPv6/TCP LSO
if (rc)
return rc;
- if (rsp->lso_format_idx != base + 7)
- return -EFAULT;
- otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7);
+ dev->lso_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx;
+ otx2_nix_dbg("tun v4v6 fmt=%u\n", rsp->lso_format_idx);
/*
* IPv6/TUN HDR/IPv4/TCP LSO
if (rc)
return rc;
- if (rsp->lso_format_idx != base + 8)
- return -EFAULT;
- otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8);
+ dev->lso_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx;
+ otx2_nix_dbg("tun v6v4 fmt=%u\n", rsp->lso_format_idx);
/*
* IPv6/TUN HDR/IPv6/TCP LSO
rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (rc)
return rc;
- if (rsp->lso_format_idx != base + 9)
- return -EFAULT;
- otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9);
+
+ dev->lso_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx;
+ otx2_nix_dbg("tun v6v6 fmt=%u\n", rsp->lso_format_idx);
+
+ /* Save all tun formats into u64 for fast path.
+ * Lower 32bit has non-udp tunnel formats.
+ * Upper 32bit has udp tunnel formats.
+ */
+ fmt = dev->lso_tun_idx;
+ dev->lso_tun_fmt = ((uint64_t)fmt[NIX_LSO_TUN_V4V4] |
+ (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 8 |
+ (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 16 |
+ (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 24);
+
+ fmt = dev->lso_udp_tun_idx;
+ dev->lso_tun_fmt |= ((uint64_t)fmt[NIX_LSO_TUN_V4V4] << 32 |
+ (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 40 |
+ (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 48 |
+ (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 56);
+
return 0;
}
struct otx2_eth_txq *txq = tx_queue; uint16_t i;
const rte_iova_t io_addr = txq->io_addr;
void *lmt_addr = txq->lmt_addr;
+ uint64_t lso_tun_fmt;
NIX_XMIT_FC_OR_RETURN(txq, pkts);
/* Perform header writes before barrier for TSO */
if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ lso_tun_fmt = txq->lso_tun_fmt;
for (i = 0; i < pkts; i++)
otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
rte_io_wmb();
for (i = 0; i < pkts; i++) {
- otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+ otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
/* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
tx_pkts[i]->ol_flags, 4, flags);
struct otx2_eth_txq *txq = tx_queue; uint64_t i;
const rte_iova_t io_addr = txq->io_addr;
void *lmt_addr = txq->lmt_addr;
+ uint64_t lso_tun_fmt;
uint16_t segdw;
NIX_XMIT_FC_OR_RETURN(txq, pkts);
/* Perform header writes before barrier for TSO */
if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ lso_tun_fmt = txq->lso_tun_fmt;
for (i = 0; i < pkts; i++)
otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
rte_io_wmb();
for (i = 0; i < pkts; i++) {
- otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+ otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags);
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
tx_pkts[i]->ol_flags, segdw,
}
static __rte_always_inline void
-otx2_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
+otx2_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
+ const uint64_t lso_tun_fmt)
{
struct nix_send_ext_s *send_hdr_ext;
struct nix_send_hdr_s *send_hdr;
(ol_flags & PKT_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun = (NIX_UDP_TUN_BITMASK >>
((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) & 0x1;
+ uint8_t shift = is_udp_tun ? 32 : 0;
+
+ shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
+ shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
/* Update format for UDP tunneled packet */
- send_hdr_ext->w0.lso_format += is_udp_tun ? 2 : 6;
-
- send_hdr_ext->w0.lso_format +=
- !!(ol_flags & PKT_TX_OUTER_IPV6) << 1;
+ send_hdr_ext->w0.lso_format = (lso_tun_fmt >> shift);
}
}