#include <rte_common.h>
#include <rte_hexdump.h>
#include <rte_atomic.h>
+#include <rte_spinlock.h>
+#include <rte_io.h>
#include "mlx5_utils.h"
#include "mlx5.h"
#include "mlx5_defs.h"
#include "mlx5_prm.h"
+/* Support tunnel matching. */
+#define MLX5_FLOW_TUNNEL 5
+
struct mlx5_rxq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX5_PMD_SOFT_COUNTERS
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
- uint16_t rq_ci;
- uint16_t strd_ci; /* Stride index in a WQE for Multi-Packet RQ. */
- uint16_t rq_pi;
- uint16_t cq_ci;
+ uint32_t rq_ci;
+ uint16_t consumed_strd; /* Number of consumed strides in WQE. */
+ uint32_t rq_pi;
+ uint32_t cq_ci;
struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
volatile void *wqes;
void *cq_uar; /* CQ user access region. */
uint32_t cqn; /* CQ number. */
uint8_t cq_arm_sn; /* CQ arm seq number. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t *uar_lock_cq;
+ /* CQ (UAR) access lock required for 32bit implementations */
+#endif
uint32_t tunnel; /* Tunnel information. */
} __rte_cache_aligned;
struct priv *priv; /* Back pointer to private data. */
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
- uint32_t tunnel_types[16]; /* Tunnel type counter. */
unsigned int irq:1; /* Whether IRQ is enabled. */
uint16_t idx; /* Queue index. */
+ uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
+ uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
};
/* Indirection table. */
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
uint64_t hash_fields; /* Verbs Hash fields. */
- uint32_t tunnel; /* Tunnel type. */
- uint32_t rss_level; /* RSS on tunnel level. */
uint32_t rss_key_len; /* Hash key length in bytes. */
uint8_t rss_key[]; /* Hash key. */
};
volatile void *bf_reg; /* Blueflame register remapped. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t *uar_lock;
+ /* UAR access lock required for 32bit implementations */
+#endif
} __rte_cache_aligned;
/* Verbs Rx queue elements. */
/* mlx5_rxq.c */
extern uint8_t rss_hash_default_key[];
-extern const size_t rss_hash_default_key_len;
int mlx5_check_mprq_support(struct rte_eth_dev *dev);
int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev);
int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_ibv *ind_tbl);
int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev);
struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
- uint32_t tunnel, uint32_t rss_level);
+ int tunnel __rte_unused);
struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n,
- uint32_t tunnel, uint32_t rss_level);
+ const uint16_t *queues, uint32_t queues_n);
int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
+void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
uint64_t mlx5_get_rx_port_offloads(void);
uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
+ struct rte_mempool *mp);
+
+/**
+ * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
+ * 64bit architectures.
+ *
+ * @param val
+ * value to write in CPU endian format.
+ * @param addr
+ * Address to write to.
+ * @param lock
+ * Address of the lock to use for that UAR access.
+ */
+static __rte_always_inline void
+__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
+ rte_spinlock_t *lock __rte_unused)
+{
+#ifdef RTE_ARCH_64
+ rte_write64_relaxed(val, addr);
+#else /* !RTE_ARCH_64 */
+ rte_spinlock_lock(lock);
+ rte_write32_relaxed(val, addr);
+ rte_io_wmb();
+ rte_write32_relaxed(val >> 32,
+ (volatile void *)((volatile char *)addr + 4));
+ rte_spinlock_unlock(lock);
+#endif
+}
+
+/**
+ * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
+ * 64bit architectures while guaranteeing the order of execution with the
+ * code being executed.
+ *
+ * @param val
+ * value to write in CPU endian format.
+ * @param addr
+ * Address to write to.
+ * @param lock
+ * Address of the lock to use for that UAR access.
+ */
+static __rte_always_inline void
+__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock)
+{
+ rte_io_wmb();
+ __mlx5_uar_write64_relaxed(val, addr, lock);
+}
+
+/* Assist macros, used instead of directly calling the functions they wrap. */
+#ifdef RTE_ARCH_64
+#define mlx5_uar_write64_relaxed(val, dst, lock) \
+ __mlx5_uar_write64_relaxed(val, dst, NULL)
+#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
+#else
+#define mlx5_uar_write64_relaxed(val, dst, lock) \
+ __mlx5_uar_write64_relaxed(val, dst, lock)
+#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
+#endif
#ifndef NDEBUG
/**
check_cqe_seen(volatile struct mlx5_cqe *cqe)
{
static const uint8_t magic[] = "seen";
- volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd1)] = &cqe->rsvd1;
int ret = 1;
unsigned int i;
*txq->cq_db = rte_cpu_to_be_32(cq_ci);
}
+/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
+ * cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx5_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
/**
* Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
* as mempool is pre-configured and static.
return mlx5_tx_addr2mr_bh(txq, addr);
}
-#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+static __rte_always_inline uint32_t
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey = mlx5_tx_addr2mr(txq, addr);
+
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ if (rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ lkey = mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
+ }
+ return lkey;
+}
/**
* Ring TX queue doorbell and flush the update if requested.
*txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
- *dst = *src;
+ mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
if (cond)
rte_wmb();
}
*/
static __rte_always_inline void
txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
- uint8_t tso, uint64_t vlan,
- uint8_t *offsets, uint8_t *swp_types)
+ uint8_t *offsets, uint8_t *swp_types)
{
- uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
+ const uint64_t vlan = buf->ol_flags & PKT_TX_VLAN_PKT;
+ const uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
+ const uint64_t tso = buf->ol_flags & PKT_TX_TCP_SEG;
const uint64_t csum_flags = buf->ol_flags & PKT_TX_L4_MASK;
const uint64_t inner_ip =
buf->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6);
uint16_t idx;
uint16_t off;
- if (likely(!tunnel || !txq->swp_en ||
- (tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP)))
+ if (likely(!txq->swp_en || (tunnel != PKT_TX_TUNNEL_UDP &&
+ tunnel != PKT_TX_TUNNEL_IP)))
return;
/*
* The index should have:
* in if any of SWP offsets is set. Therefore, all of the L3 offsets
* should be set regardless of HW offload.
*/
- off = buf->outer_l2_len + (vlan ? 4 : 0);
+ off = buf->outer_l2_len + (vlan ? sizeof(struct vlan_hdr) : 0);
offsets[1] = off >> 1; /* Outer L3 offset. */
- if (tunnel == PKT_TX_TUNNEL_UDP) {
- off += buf->outer_l3_len;
+ off += buf->outer_l3_len;
+ if (tunnel == PKT_TX_TUNNEL_UDP)
offsets[0] = off >> 1; /* Outer L4 offset. */
- }
if (inner_ip) {
off += buf->l2_len;
offsets[3] = off >> 1; /* Inner L3 offset. */