* DF bit = 0 otherwise
*/
send_hdr->w0.df = otx2_nix_prefree_seg(m);
+ /* Ensuring mbuf fields which got updated in
+ * otx2_nix_prefree_seg are written before LMTST.
+ */
+ rte_io_wmb();
}
/* Mark mempool object as "put" since it is freed by NIX */
if (!send_hdr->w0.df)
return otx2_lmt_submit(io_addr);
}
+static __rte_always_inline uint64_t
+otx2_nix_xmit_submit_lmt_release(const rte_iova_t io_addr)
+{
+ return otx2_lmt_submit_release(io_addr);
+}
+
static __rte_always_inline uint16_t
otx2_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
{
sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
*slist = rte_mbuf_data_iova(m);
/* Set invert df if buffer is not to be freed by H/W */
- if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
sg_u |= (otx2_nix_prefree_seg(m) << (i + 55));
+ /* Commit changes to mbuf */
+ rte_io_wmb();
+ }
/* Mark mempool object as "put" since it is freed by NIX */
- if (!(sg_u & (1ULL << (i + 55)))) {
- m->next = NULL;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (!(sg_u & (1ULL << (i + 55))))
__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
- }
+ rte_io_wmb();
+#endif
slist++;
i++;
nb_segs--;
} while (lmt_status == 0);
}
+static __rte_always_inline void
+otx2_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,
+ rte_iova_t io_addr, uint16_t segdw)
+{
+ uint64_t lmt_status;
+
+ rte_io_wmb();
+ do {
+ otx2_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
+ lmt_status = otx2_lmt_submit(io_addr);
+ } while (lmt_status == 0);
+}
+
#define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F
#define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F
#define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F