With the upstream rdma-core, to enable Rx CQE compression,
mlx5dv_create_cq() in Direct Verbs has to be used instead of regular
Verbs call (ibv_create_cq()). And if the size of CQE is 128 bytes,
compression is supported only by certain devices. Thus, it has to be
decided by checking the capability bits.
Fixes:
43e9d9794cde ("net/mlx5: support upstream rdma-core")
Cc: stable@dpdk.org
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
infiniband/mlx5dv.h \
enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
$(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_CQE_128B_COMP \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \
+ $(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_ETHTOOL_LINK_MODE_25G \
/usr/include/linux/ethtool.h \
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
#endif
+#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
+#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
+#endif
+
struct mlx5_args {
int cqe_comp;
int txq_inline;
struct ibv_device_attr_ex device_attr;
unsigned int sriov;
unsigned int mps;
+ unsigned int cqe_comp;
unsigned int tunnel_en = 0;
int idx;
int i;
INFO("MPW is disabled\n");
mps = MLX5_MPW_DISABLED;
}
+ if (RTE_CACHE_LINE_SIZE == 128 &&
+ !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
+ cqe_comp = 0;
+ else
+ cqe_comp = 1;
if (ibv_query_device_ex(attr_ctx, NULL, &device_attr))
goto error;
INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
priv->pd = pd;
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
- priv->cqe_comp = 1; /* Enable compression by default. */
+ priv->cqe_comp = cqe_comp;
priv->tunnel_en = tunnel_en;
/* Enable vector by default if supported. */
priv->tx_vec_en = 1;
priv->txq_inline = MLX5_WQE_SIZE_MAX -
MLX5_WQE_SIZE;
}
+ if (priv->cqe_comp && !cqe_comp) {
+ WARN("Rx CQE compression isn't supported");
+ priv->cqe_comp = 0;
+ }
/* Configure the first MAC address by default. */
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx5_en loaded?"
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct ibv_wq_attr mod;
union {
- struct ibv_cq_init_attr_ex cq;
+ struct {
+ struct ibv_cq_init_attr_ex ibv;
+ struct mlx5dv_cq_init_attr mlx5;
+ } cq;
struct ibv_wq_init_attr wq;
struct ibv_cq_ex cq_attr;
} attr;
goto error;
}
}
- attr.cq = (struct ibv_cq_init_attr_ex){
+ attr.cq.ibv = (struct ibv_cq_init_attr_ex){
+ .cqe = cqe_n,
+ .channel = tmpl->channel,
+ .comp_mask = 0,
+ };
+ attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
if (priv->cqe_comp) {
- attr.cq.comp_mask |= IBV_CQ_INIT_ATTR_MASK_FLAGS;
- attr.cq.flags |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+ attr.cq.mlx5.comp_mask |=
+ MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+ attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
/*
* For vectorized Rx, it must not be doubled in order to
* make cq_ci and rq_ci aligned.
if (rxq_check_vec_support(rxq_data) < 0)
cqe_n *= 2;
}
- tmpl->cq = ibv_create_cq(priv->ctx, cqe_n, NULL, tmpl->channel, 0);
+ tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
+ &attr.cq.mlx5));
if (tmpl->cq == NULL) {
ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
goto error;