#include <rte_common.h>
#include <rte_spinlock.h>
+#include <mlx5_common.h>
#include <mlx5_common_mr.h>
#include "mlx5.h"
int32_t ts_offset; /* Timestamp field dynamic offset. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
struct mlx5_txq_stats stats; /* TX queue counters. */
-#ifndef RTE_ARCH_64
- rte_spinlock_t *uar_lock;
- /* UAR access lock required for 32bit implementations */
-#endif
+ struct mlx5_uar_data uar_data;
struct rte_mbuf *elts[0];
/* Storage for queued packets, must be the last field. */
} __rte_cache_aligned;
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);
void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
-void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl, void *bf_reg);
int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
/* mlx5_tx.c */
-uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
- uint16_t pkts_n);
void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
unsigned int olx __rte_unused);
int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
MLX5_TXOFF_PRE_DECL(mc_mpw);
MLX5_TXOFF_PRE_DECL(i_mpw);
-static __rte_always_inline uint64_t *
+static __rte_always_inline struct mlx5_uar_data *
mlx5_tx_bfreg(struct mlx5_txq_data *txq)
{
- return MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
-}
-
-/**
- * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
- * 64bit architectures.
- *
- * @param val
- * value to write in CPU endian format.
- * @param addr
- * Address to write to.
- * @param lock
- * Address of the lock to use for that UAR access.
- */
-static __rte_always_inline void
-__mlx5_uar_write64_relaxed(uint64_t val, void *addr,
- rte_spinlock_t *lock __rte_unused)
-{
-#ifdef RTE_ARCH_64
- *(uint64_t *)addr = val;
-#else /* !RTE_ARCH_64 */
- rte_spinlock_lock(lock);
- *(uint32_t *)addr = val;
- rte_io_wmb();
- *((uint32_t *)addr + 1) = val >> 32;
- rte_spinlock_unlock(lock);
-#endif
-}
-
-/**
- * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
- * 64bit architectures while guaranteeing the order of execution with the
- * code being executed.
- *
- * @param val
- * value to write in CPU endian format.
- * @param addr
- * Address to write to.
- * @param lock
- * Address of the lock to use for that UAR access.
- */
-static __rte_always_inline void
-__mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
-{
- rte_io_wmb();
- __mlx5_uar_write64_relaxed(val, addr, lock);
-}
-
-/* Assist macros, used instead of directly calling the functions they wrap. */
-#ifdef RTE_ARCH_64
-#define mlx5_uar_write64_relaxed(val, dst, lock) \
- __mlx5_uar_write64_relaxed(val, dst, NULL)
-#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
-#else
-#define mlx5_uar_write64_relaxed(val, dst, lock) \
- __mlx5_uar_write64_relaxed(val, dst, lock)
-#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
-#endif
-
-/**
- * Ring TX queue doorbell and flush the update if requested.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param wqe
- * Pointer to the last WQE posted in the NIC.
- * @param cond
- * Request for write memory barrier after BlueFlame update.
- */
-static __rte_always_inline void
-mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
- int cond)
-{
- uint64_t *dst = mlx5_tx_bfreg(txq);
- volatile uint64_t *src = ((volatile uint64_t *)wqe);
-
- rte_io_wmb();
- *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
- /* Ensure ordering between DB record and BF copy. */
- rte_wmb();
- mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
- if (cond)
- rte_wmb();
+ return &MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx];
}
/**
static __rte_always_inline void
mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
{
- mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
+ mlx5_doorbell_ring(mlx5_tx_bfreg(txq), *(volatile uint64_t *)wqe,
+ txq->wqe_ci, txq->qp_db, 1);
}
/**
uint8_t *psrc;
MLX5_ASSERT(len);
- MLX5_ASSERT(must <= len);
do {
/* Allow zero length packets, must check first. */
dlen = rte_pktmbuf_data_len(loc->mbuf);
if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
/*
* Copy only the minimal required
- * part of the data buffer.
+ * part of the data buffer. Limit amount
+ * of data to be copied to the length of
+ * available space.
*/
- len = diff;
+ len = RTE_MIN(len, diff);
}
}
continue;
inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
inlen > (dlen + vlan)))
return MLX5_TXCMP_CODE_ERROR;
- MLX5_ASSERT(inlen >= txq->inlen_mode);
/*
* Check whether there are enough free WQEBBs:
* - Control Segment
MLX5_ASSERT(txq->inlen_mode >=
MLX5_ESEG_MIN_INLINE_SIZE);
MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
- inlen = txq->inlen_mode;
+ inlen = RTE_MIN(txq->inlen_mode, inlen);
} else if (vlan && !txq->vlan_en) {
/*
* VLAN insertion is requested and hardware does not
} else {
goto do_first;
}
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
+ goto do_build;
/*
* Now we know the minimal amount of data is requested
* to inline. Check whether we should inline the buffers
mbuf = NEXT(mbuf);
/* There should be not end of packet. */
MLX5_ASSERT(mbuf);
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE)
+ break;
nxlen = inlen + rte_pktmbuf_data_len(mbuf);
} while (unlikely(nxlen < txq->inlen_send));
}
* Estimate the number of Data Segments conservatively,
* supposing no any mbufs is being freed during inlining.
*/
+do_build:
MLX5_ASSERT(inlen <= txq->inlen_send);
ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
MLX5_ESEG_MIN_INLINE_SIZE +
* packets are coming and the write barrier will be issued on
* the next burst (after descriptor writing, at least).
*/
- mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
- (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
+ mlx5_doorbell_ring(mlx5_tx_bfreg(txq),
+ *(volatile uint64_t *)loc.wqe_last, txq->wqe_ci,
+ txq->qp_db, !txq->db_nc &&
+ (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
/* Not all of the mbufs may be stored into elts yet. */
part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {