struct otx2_eth_txq *txq = tx_queue; uint16_t i;
const rte_iova_t io_addr = txq->io_addr;
void *lmt_addr = txq->lmt_addr;
+ uint64_t lso_tun_fmt;
NIX_XMIT_FC_OR_RETURN(txq, pkts);
/* Perform header writes before barrier for TSO */
if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ lso_tun_fmt = txq->lso_tun_fmt;
for (i = 0; i < pkts; i++)
otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
- /* Lets commit any changes in the packet */
- rte_io_wmb();
+ /* Lets commit any changes in the packet here as no further changes
+ * to the packet will be done unless no fast free is enabled.
+ */
+ if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+ rte_io_wmb();
for (i = 0; i < pkts; i++) {
- otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+ otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
/* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
tx_pkts[i]->ol_flags, 4, flags);
struct otx2_eth_txq *txq = tx_queue; uint64_t i;
const rte_iova_t io_addr = txq->io_addr;
void *lmt_addr = txq->lmt_addr;
+ uint64_t lso_tun_fmt;
uint16_t segdw;
NIX_XMIT_FC_OR_RETURN(txq, pkts);
/* Perform header writes before barrier for TSO */
if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ lso_tun_fmt = txq->lso_tun_fmt;
for (i = 0; i < pkts; i++)
otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
- /* Lets commit any changes in the packet */
- rte_io_wmb();
+ /* Lets commit any changes in the packet here as no further changes
+ * to the packet will be done unless no fast free is enabled.
+ */
+ if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+ rte_io_wmb();
for (i = 0; i < pkts; i++) {
- otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+ otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags);
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
tx_pkts[i]->ol_flags, segdw,
/* Reduce the cached count */
txq->fc_cache_pkts -= pkts;
- /* Lets commit any changes in the packet */
- rte_io_wmb();
+ /* Lets commit any changes in the packet here as no further changes
+ * to the packet will be done unless no fast free is enabled.
+ */
+ if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+ rte_io_wmb();
senddesc01_w0 = vld1q_dup_u64(&txq->cmd[0]);
senddesc23_w0 = senddesc01_w0;
if (otx2_nix_prefree_seg(mbuf))
vsetq_lane_u64(0x80000, xmask01, 0);
else
- __mempool_check_cookies(mbuf->pool,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
(void **)&mbuf,
1, 0);
if (otx2_nix_prefree_seg(mbuf))
vsetq_lane_u64(0x80000, xmask01, 1);
else
- __mempool_check_cookies(mbuf->pool,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
(void **)&mbuf,
1, 0);
if (otx2_nix_prefree_seg(mbuf))
vsetq_lane_u64(0x80000, xmask23, 0);
else
- __mempool_check_cookies(mbuf->pool,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
(void **)&mbuf,
1, 0);
if (otx2_nix_prefree_seg(mbuf))
vsetq_lane_u64(0x80000, xmask23, 1);
else
- __mempool_check_cookies(mbuf->pool,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool,
(void **)&mbuf,
1, 0);
senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
+ /* Ensuring mbuf fields which got updated in
+ * otx2_nix_prefree_seg are written before LMTST.
+ */
+ rte_io_wmb();
} else {
struct rte_mbuf *mbuf;
/* Mark mempool object as "put" since
*/
mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
offsetof(struct rte_mbuf, buf_iova));
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
1, 0);
mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
offsetof(struct rte_mbuf, buf_iova));
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
1, 0);
mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
offsetof(struct rte_mbuf, buf_iova));
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
1, 0);
mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
offsetof(struct rte_mbuf, buf_iova));
- __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
1, 0);
RTE_SET_USED(mbuf);
}
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6 assumed) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
- 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
- 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
- 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
- 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
- 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_TCP_CKSUM */
+ 0x22, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_SCTP_CKSUM */
+ 0x32, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_UDP_CKSUM */
+ 0x03, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
};
{
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6) */
- 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
- 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
- 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
- 0x03, /* PKT_TX_IP_CKSUM */
- 0x13, /* PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
+ 0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
+ 0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
+ 0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
+ 0x13, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x02, /* PKT_TX_IPV4 */
- 0x12, /* PKT_TX_IPV4 |
- * PKT_TX_TCP_CKSUM
+ 0x02, /* RTE_MBUF_F_TX_IPV4 */
+ 0x12, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x22, /* PKT_TX_IPV4 |
- * PKT_TX_SCTP_CKSUM
+ 0x22, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x32, /* PKT_TX_IPV4 |
- * PKT_TX_UDP_CKSUM
+ 0x32, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
- 0x03, /* PKT_TX_IPV4 |
- * PKT_TX_IP_CKSUM
+ 0x03, /* RTE_MBUF_F_TX_IPV4 |
+ * RTE_MBUF_F_TX_IP_CKSUM
*/
- 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_TCP_CKSUM
+ 0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_TCP_CKSUM
*/
- 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_SCTP_CKSUM
+ 0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_SCTP_CKSUM
*/
- 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
- * PKT_TX_UDP_CKSUM
+ 0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
+ * RTE_MBUF_F_TX_UDP_CKSUM
*/
},
else
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
- if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
rte_mb();