#include <rte_atomic.h>
#include <rte_spinlock.h>
#include <rte_io.h>
+#include <rte_bus_pci.h>
#include "mlx5_utils.h"
#include "mlx5.h"
#define MLX5_FLOW_TUNNEL 5
struct mlx5_rxq_stats {
- unsigned int idx; /**< Mapping index. */
#ifdef MLX5_PMD_SOFT_COUNTERS
uint64_t ipackets; /**< Total of successfully received packets. */
uint64_t ibytes; /**< Total of successfully received bytes. */
};
struct mlx5_txq_stats {
- unsigned int idx; /**< Mapping index. */
#ifdef MLX5_PMD_SOFT_COUNTERS
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
uint64_t oerrors; /**< Total number of failed transmitted packets. */
};
-struct priv;
+struct mlx5_priv;
/* Compressed CQE context. */
struct rxq_zip {
uint16_t consumed_strd; /* Number of consumed strides in WQE. */
uint32_t rq_pi;
uint32_t cq_ci;
+ uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
volatile void *wqes;
struct rte_mempool *mp;
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
+ uint16_t idx; /* Queue index. */
struct mlx5_rxq_stats stats;
uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
/* RX queue control descriptor. */
struct mlx5_rxq_ctrl {
+ struct mlx5_rxq_data rxq; /* Data path structure. */
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
- struct priv *priv; /* Back pointer to private data. */
- struct mlx5_rxq_data rxq; /* Data path structure. */
+ struct mlx5_priv *priv; /* Back pointer to private data. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int irq:1; /* Whether IRQ is enabled. */
- uint16_t idx; /* Queue index. */
uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
};
rte_atomic32_t refcnt; /* Reference counter. */
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ void *action; /* DV QP action pointer. */
+#endif
uint64_t hash_fields; /* Verbs Hash fields. */
uint32_t rss_key_len; /* Hash key length in bytes. */
uint8_t rss_key[]; /* Hash key. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
- volatile void *bf_reg; /* Blueflame register remapped. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
+ uint16_t port_id; /* Port ID of device. */
+ uint16_t idx; /* Queue index. */
struct mlx5_txq_stats stats; /* TX queue counters. */
#ifndef RTE_ARCH_64
rte_spinlock_t *uar_lock;
/* TX queue control descriptor. */
struct mlx5_txq_ctrl {
+ struct mlx5_txq_data txq; /* Data path structure. */
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int max_inline_data; /* Max inline data. */
unsigned int max_tso_header; /* Max TSO header size. */
struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
- struct priv *priv; /* Back pointer to private data. */
- struct mlx5_txq_data txq; /* Data path structure. */
+ struct mlx5_priv *priv; /* Back pointer to private data. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
- volatile void *bf_reg_orig; /* Blueflame register from verbs. */
- uint16_t idx; /* Queue index. */
+ void *bf_reg; /* BlueFlame register from Verbs. */
};
+#define MLX5_TX_BFREG(txq) \
+ (MLX5_PROC_PRIV((txq)->port_id)->uar_table[(txq)->idx])
+
/* mlx5_rxq.c */
extern uint8_t rss_hash_default_key[];
int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf);
void mlx5_tx_queue_release(void *dpdk_txq);
-int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
+int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
struct rte_mempool *mp);
+int mlx5_dma_map(struct rte_pci_device *pdev, void *addr, uint64_t iova,
+ size_t len);
+int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova,
+ size_t len);
/**
* Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
op_code, op_code, syndrome);
rte_hexdump(stderr, "MLX5 Error CQE:",
(const void *)((uintptr_t)err_cqe),
- sizeof(*err_cqe));
+ sizeof(*cqe));
}
return 1;
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
}
#endif /* NDEBUG */
++cq_ci;
+ rte_cio_rmb();
txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
ctrl = (volatile struct mlx5_wqe_ctrl *)
tx_mlx5_wqe(txq, txq->wqe_pi);
static inline struct rte_mempool *
mlx5_mb2mp(struct rte_mbuf *buf)
{
- if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ if (unlikely(RTE_MBUF_CLONED(buf)))
return rte_mbuf_from_indirect(buf)->pool;
return buf->pool;
}
mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
int cond)
{
- uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
+ uint64_t *dst = MLX5_TX_BFREG(txq);
volatile uint64_t *src = ((volatile uint64_t *)wqe);
rte_cio_wmb();
* Pointer to the Tx queue.
* @param buf
* Pointer to the mbuf.
- * @param tso
- * TSO offloads enabled.
- * @param vlan
- * VLAN offloads enabled
* @param offsets
* Pointer to the SWP header offsets.
* @param swp_types
* in if any of SWP offsets is set. Therefore, all of the L3 offsets
* should be set regardless of HW offload.
*/
- off = buf->outer_l2_len + (vlan ? sizeof(struct vlan_hdr) : 0);
+ off = buf->outer_l2_len + (vlan ? sizeof(struct rte_vlan_hdr) : 0);
offsets[1] = off >> 1; /* Outer L3 offset. */
off += buf->outer_l3_len;
if (tunnel == PKT_TX_TUNNEL_UDP)