struct otx2_eth_txq *txq = tx_queue; uint16_t i;
const rte_iova_t io_addr = txq->io_addr;
void *lmt_addr = txq->lmt_addr;
+ uint64_t lso_tun_fmt;
NIX_XMIT_FC_OR_RETURN(txq, pkts);
/* Perform header writes before barrier for TSO */
if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ lso_tun_fmt = txq->lso_tun_fmt;
for (i = 0; i < pkts; i++)
otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
rte_io_wmb();
for (i = 0; i < pkts; i++) {
- otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+ otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
/* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
tx_pkts[i]->ol_flags, 4, flags);
struct otx2_eth_txq *txq = tx_queue; uint64_t i;
const rte_iova_t io_addr = txq->io_addr;
void *lmt_addr = txq->lmt_addr;
+ uint64_t lso_tun_fmt;
uint16_t segdw;
NIX_XMIT_FC_OR_RETURN(txq, pkts);
/* Perform header writes before barrier for TSO */
if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ lso_tun_fmt = txq->lso_tun_fmt;
for (i = 0; i < pkts; i++)
otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
+ /* Lets commit any changes in the packet here as no further changes
+ * to the packet will be done unless no fast free is enabled.
+ */
+ if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+ rte_io_wmb();
+
for (i = 0; i < pkts; i++) {
- otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+ otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags);
- /* Lets commit any changes in the packet */
- rte_io_wmb();
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
tx_pkts[i]->ol_flags, segdw,
flags);