return result;
}
+static __rte_always_inline uint64_t
+otx2_lmt_submit_release(rte_iova_t io_address)
+{
+ uint64_t result;
+
+ asm volatile (
+ ".cpu generic+lse\n"
+ "ldeorl xzr,%x[rf],[%[rs]]" :
+ [rf] "=r"(result) : [rs] "r"(io_address));
+ return result;
+}
+
static __rte_always_inline void
otx2_lmt_mov(void *out, const void *in, const uint32_t lmtext)
{
#ifndef _OTX2_IO_GENERIC_H_
#define _OTX2_IO_GENERIC_H_
+#include <string.h>
+
#define otx2_load_pair(val0, val1, addr) \
do { \
val0 = rte_read64_relaxed((void *)(addr)); \
return 0;
}
+static inline int64_t
+otx2_lmt_submit_release(uint64_t io_address)
+{
+ RTE_SET_USED(io_address);
+
+ return 0;
+}
+
static __rte_always_inline void
otx2_lmt_mov(void *out, const void *in, const uint32_t lmtext)
{
- RTE_SET_USED(out);
- RTE_SET_USED(in);
- RTE_SET_USED(lmtext);
+ /* Copy four words if lmtext = 0
+ * six words if lmtext = 1
+ * eight words if lmtext =2
+ */
+ memcpy(out, in, (4 + (2 * lmtext)) * sizeof(uint64_t));
}
static __rte_always_inline void
/* Perform header writes before barrier for TSO */
otx2_nix_xmit_prepare_tso(m, flags);
- rte_io_wmb();
+ /* Lets commit any changes in the packet here in case of single seg as
+ * no further changes to mbuf will be done.
+ * While for multi seg all mbufs used are set to NULL in
+ * otx2_nix_prepare_mseg() after preparing the sg list and these changes
+ * should be committed before LMTST.
+ * Also in no fast free case some mbuf fields are updated in
+ * otx2_nix_prefree_seg
+ * Hence otx2_nix_xmit_submit_lmt_release/otx2_nix_xmit_mseg_one_release
+ * has store barrier for multiseg.
+ */
+ if (!(flags & NIX_TX_MULTI_SEG_F) &&
+ !(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+ rte_io_wmb();
txq = otx2_ssogws_xtract_meta(m, txq_data);
otx2_ssogws_prepare_pkt(txq, m, cmd, flags);
if (!ev->sched_type) {
otx2_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
otx2_ssogws_head_wait(ws);
- if (otx2_nix_xmit_submit_lmt(txq->io_addr) == 0)
+ if (otx2_nix_xmit_submit_lmt_release(txq->io_addr) == 0)
otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr,
txq->io_addr, segdw);
} else {
- otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr,
- segdw);
+ otx2_nix_xmit_mseg_one_release(cmd, txq->lmt_addr,
+ txq->io_addr, segdw);
}
} else {
/* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
const uint64_t addr = npa_lf_aura_handle_to_base(aura_handle) +
NPA_LF_AURA_OP_FREE0;
+ /* Ensure mbuf init changes are written before the free pointers
+ * are enqueued to the stack.
+ */
+ rte_io_wmb();
for (index = 0; index < n; index++)
otx2_store_pair((uint64_t)obj_table[index], reg, addr);
otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
- /* Lets commit any changes in the packet */
- rte_io_wmb();
+ /* Lets commit any changes in the packet here as no further changes
+ * to the packet will be done unless no fast free is enabled.
+ */
+ if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+ rte_io_wmb();
for (i = 0; i < pkts; i++) {
otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
}
- /* Lets commit any changes in the packet */
- rte_io_wmb();
-
for (i = 0; i < pkts; i++) {
otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags);
+ /* Lets commit any changes in the packet */
+ rte_io_wmb();
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
tx_pkts[i]->ol_flags, segdw,
flags);
/* Reduce the cached count */
txq->fc_cache_pkts -= pkts;
- /* Lets commit any changes in the packet */
- rte_io_wmb();
+ /* Lets commit any changes in the packet here as no further changes
+ * to the packet will be done unless no fast free is enabled.
+ */
+ if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+ rte_io_wmb();
senddesc01_w0 = vld1q_dup_u64(&txq->cmd[0]);
senddesc23_w0 = senddesc01_w0;
1, 0);
senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
+ /* Ensuring mbuf fields which got updated in
+ * otx2_nix_prefree_seg are written before LMTST.
+ */
+ rte_io_wmb();
} else {
struct rte_mbuf *mbuf;
/* Mark mempool object as "put" since
* DF bit = 0 otherwise
*/
send_hdr->w0.df = otx2_nix_prefree_seg(m);
+ /* Ensuring mbuf fields which got updated in
+ * otx2_nix_prefree_seg are written before LMTST.
+ */
+ rte_io_wmb();
}
/* Mark mempool object as "put" since it is freed by NIX */
if (!send_hdr->w0.df)
return otx2_lmt_submit(io_addr);
}
+static __rte_always_inline uint64_t
+otx2_nix_xmit_submit_lmt_release(const rte_iova_t io_addr)
+{
+ return otx2_lmt_submit_release(io_addr);
+}
+
static __rte_always_inline uint16_t
otx2_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
{
} while (lmt_status == 0);
}
+static __rte_always_inline void
+otx2_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,
+ rte_iova_t io_addr, uint16_t segdw)
+{
+ uint64_t lmt_status;
+
+ rte_io_wmb();
+ do {
+ otx2_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
+ lmt_status = otx2_lmt_submit(io_addr);
+ } while (lmt_status == 0);
+}
+
#define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F
#define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F
#define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F