#include "mlx5.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
/* Support tunnel matching. */
#define MLX5_FLOW_TUNNEL 10
+#define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv
+#define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl))
+#define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl))
+
+/* First entry must be NULL for comparison. */
+#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
+
struct mlx5_rxq_stats {
#ifdef MLX5_PMD_SOFT_COUNTERS
uint64_t ipackets; /**< Total of successfully received packets. */
uint32_t cqe_cnt; /* Number of CQEs. */
};
-/* Multi-Packet RQ buffer header. */
-struct mlx5_mprq_buf {
- struct rte_mempool *mp;
- uint16_t refcnt; /* Atomically accessed refcnt. */
- uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
- struct rte_mbuf_ext_shared_info shinfos[];
- /*
- * Shared information per stride.
- * More memory will be allocated for the first stride head-room and for
- * the strides data.
- */
-} __rte_cache_aligned;
-
/* Get pointer to the first stride. */
#define mlx5_mprq_buf_addr(ptr, strd_n) (RTE_PTR_ADD((ptr), \
sizeof(struct mlx5_mprq_buf) + \
unsigned int lro:1; /* Enable LRO. */
unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
unsigned int mcqe_format:3; /* CQE compression format. */
+ unsigned int shared:1; /* Shared RXQ. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
struct mlx5_rxq_ctrl {
struct mlx5_rxq_data rxq; /* Data path structure. */
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
- struct mlx5_priv *priv; /* Back pointer to private data. */
+ struct mlx5_dev_ctx_shared *sh; /* Shared context. */
enum mlx5_rxq_type type; /* Rxq type. */
unsigned int socket; /* CPU socket ID for allocations. */
+ LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */
+ uint32_t share_group; /* Group ID of shared RXQ. */
+ uint16_t share_qid; /* Shared RxQ ID in group. */
+ unsigned int started:1; /* Whether (shared) RXQ has been started. */
unsigned int irq:1; /* Whether IRQ is enabled. */
uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
uint32_t wqn; /* WQ number. */
uint16_t dump_file_n; /* Number of dump files. */
+};
+
+/* RX queue private data. */
+struct mlx5_rxq_priv {
+ uint16_t idx; /* Queue index. */
+ uint32_t refcnt; /* Reference counter. */
+ struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
+ LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
+ struct mlx5_priv *priv; /* Back pointer to private data. */
+ struct mlx5_devx_rq devx_rq;
struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
uint32_t hairpin_status; /* Hairpin binding status. */
};
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
-struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
+struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,
+ struct mlx5_rxq_priv *rxq,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
const struct rte_eth_rxseg_split *rx_seg,
uint16_t n_seg);
struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
- (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);
-struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx);
+uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
bool standalone);
+int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl);
+int mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl);
struct mlx5_list_entry *mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx);
int mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec);
-void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
uint16_t pkts_n);
uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
uint16_t pkts_n);
int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
-uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint32_t mlx5_rx_queue_count(void *rx_queue);
void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
int mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
*/
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
- return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->share_cache,
+ return mlx5_mr_mempool2mr_bh(&rxq_ctrl->sh->cdev->mr_scache,
mr_ctrl, mp, addr);
}
shinfo = &buf->shinfos[strd_idx];
rte_mbuf_ext_refcnt_set(shinfo, 1);
/*
- * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
+ * RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when
* attaching the stride to mbuf and more offload flags
* will be added below by calling rxq_cq_to_mbuf().
* Other fields will be overwritten.
buf_len, shinfo);
/* Set mbuf head-room. */
SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
- MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
+ MLX5_ASSERT(pkt->ol_flags == RTE_MBUF_F_EXTERNAL);
MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
DATA_LEN(pkt) = len;
return 0;
/* All the configured queues should be enabled. */
for (i = 0; i < priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL ||
+ rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
n_ibv++;
- if (mlx5_rxq_mprq_enabled(rxq))
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
++n;
}
/* Multi-Packet RQ can't be partially configured. */