return -rte_errno;
}
+/*
+ * Create the dummy QP with minimal resources for loopback.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
+{
+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct ibv_context *ctx = sh->ctx;
+ struct mlx5dv_qp_init_attr qp_init_attr = {0};
+ struct {
+ struct ibv_cq_init_attr_ex ibv;
+ struct mlx5dv_cq_init_attr mlx5;
+ } cq_attr = {{0}};
+
+ if (dev->data->dev_conf.lpbk_mode) {
+ /* Allow packet sent from NIC loop back w/o source MAC check. */
+ qp_init_attr.comp_mask |=
+ MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
+ qp_init_attr.create_flags |=
+ MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
+ } else {
+ return 0;
+ }
+ /* Only need to check refcnt, 0 after "sh" is allocated. */
+ if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+ MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
+ priv->lb_used = 1;
+ return 0;
+ }
+ cq_attr.ibv = (struct ibv_cq_init_attr_ex){
+ .cqe = 1,
+ .channel = NULL,
+ .comp_mask = 0,
+ };
+ cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
+ .comp_mask = 0,
+ };
+ /* Only CQ is needed, no WQ(RQ) is required in this case. */
+ sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx,
+ &cq_attr.ibv,
+ &cq_attr.mlx5));
+ if (!sh->self_lb.ibv_cq) {
+ DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask = IBV_QP_INIT_ATTR_PD,
+ .pd = sh->pd,
+ .send_cq = sh->self_lb.ibv_cq,
+ .recv_cq = sh->self_lb.ibv_cq,
+ .cap.max_recv_wr = 1,
+ },
+ &qp_init_attr);
+ if (!sh->self_lb.qp) {
+ DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ priv->lb_used = 1;
+ return 0;
+error:
+ if (sh->self_lb.ibv_cq) {
+ claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
+ sh->self_lb.ibv_cq = NULL;
+ }
+ (void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+ return -rte_errno;
+#else
+ RTE_SET_USED(dev);
+ return 0;
+#endif
+}
+
+/*
+ * Release the dummy queue resources for loopback.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev)
+{
+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+
+ if (!priv->lb_used)
+ return;
+ MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
+ if (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+ if (sh->self_lb.qp) {
+ claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
+ sh->self_lb.qp = NULL;
+ }
+ if (sh->self_lb.ibv_cq) {
+ claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
+ sh->self_lb.ibv_cq = NULL;
+ }
+ }
+ priv->lb_used = 0;
+#else
+ RTE_SET_USED(dev);
+ return;
+#endif
+}
+
/**
* Release an Tx verbs queue object.
*
.txq_obj_new = mlx5_txq_ibv_obj_new,
.txq_obj_modify = mlx5_ibv_modify_qp,
.txq_obj_release = mlx5_txq_ibv_obj_release,
+ .lb_dummy_queue_create = NULL,
+ .lb_dummy_queue_release = NULL,
};
struct mlx5_rxq_obj *rxq; /* Rx queue object. */
};
+/* Loopback dummy queue resources required due to Verbs API. */
+struct mlx5_lb_ctx {
+ struct ibv_qp *qp; /* QP object. */
+ void *ibv_cq; /* Completion queue. */
+ uint16_t refcnt; /* Reference count for representors. */
+};
+
#define MLX5_COUNTERS_PER_POOL 512
#define MLX5_MAX_PENDING_QUERIES 4
#define MLX5_CNT_CONTAINER_RESIZE 64
/* Meter management structure. */
struct mlx5_aso_ct_pools_mng *ct_mng;
/* Management data for ASO connection tracking. */
+ struct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */
struct mlx5_dev_shared_port port[]; /* per device port data array. */
};
int (*txq_obj_modify)(struct mlx5_txq_obj *obj,
enum mlx5_txq_modify_type type, uint8_t dev_port);
void (*txq_obj_release)(struct mlx5_txq_obj *txq_obj);
+ int (*lb_dummy_queue_create)(struct rte_eth_dev *dev);
+ void (*lb_dummy_queue_release)(struct rte_eth_dev *dev);
};
#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
unsigned int sampler_en:1; /* Whether support sampler. */
unsigned int mtr_en:1; /* Whether support meter. */
unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
+ unsigned int lb_used:1; /* Loopback queue is referred to. */
uint16_t domain_id; /* Switch domain identifier. */
uint16_t vport_id; /* Associated VF vport index (if any). */
uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
dev->data->port_id, strerror(rte_errno));
goto error;
}
+ if ((priv->config.devx && priv->config.dv_flow_en &&
+ priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
+ ret = priv->obj_ops.lb_dummy_queue_create(dev);
+ if (ret)
+ goto error;
+ }
ret = mlx5_txq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
mlx5_traffic_disable(dev);
mlx5_txq_stop(dev);
mlx5_rxq_stop(dev);
+ if (priv->obj_ops.lb_dummy_queue_release)
+ priv->obj_ops.lb_dummy_queue_release(dev);
mlx5_txpp_stop(dev); /* Stop last. */
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
mlx5_txq_stop(dev);
mlx5_rxq_stop(dev);
+ if (priv->obj_ops.lb_dummy_queue_release)
+ priv->obj_ops.lb_dummy_queue_release(dev);
mlx5_txpp_stop(dev);
return 0;