- POWER9 and ARMv8 with ConnectX-4 Lx, ConnectX-5, ConnectX-6, ConnectX-6 Dx,
ConnectX-6 Lx, BlueField and BlueField-2.
-- ``rxq_cqe_pad_en`` parameter [int]
-
- A nonzero value enables 128B padding of CQE on RX side. The size of CQE
- is aligned with the size of a cacheline of the core. If cacheline size is
- 128B, the CQE size is configured to be 128B even though the device writes
- only 64B data on the cacheline. This is to avoid unnecessary cache
- invalidation by device's two consecutive writes on to one cacheline.
- However in some architecture, it is more beneficial to update entire
- cacheline with padding the rest 64B rather than striding because
- read-modify-write could drop performance a lot. On the other hand,
- writing extra data will consume more PCIe bandwidth and could also drop
- the maximum throughput. It is recommended to empirically set this
- parameter. Disabled by default.
-
- Supported on:
-
- - CPU having 128B cacheline with ConnectX-5 and BlueField.
-
- ``rxq_pkt_pad_en`` parameter [int]
A nonzero value enables padding Rx packet to the size of cacheline on PCI
unsigned int hw_padding = 0;
unsigned int mps;
unsigned int cqe_comp;
- unsigned int cqe_pad = 0;
unsigned int tunnel_en = 0;
unsigned int mpls_en = 0;
unsigned int swp = 0;
else
cqe_comp = 1;
config->cqe_comp = cqe_comp;
-#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
- /* Whether device supports 128B Rx CQE padding. */
- cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
- (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
-#endif
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
tunnel_en = ((dv_attr.tunnel_offloads_caps &
DRV_LOG(WARNING, "Rx CQE compression isn't supported");
config->cqe_comp = 0;
}
- if (config->cqe_pad && !cqe_pad) {
- DRV_LOG(WARNING, "Rx CQE padding isn't supported");
- config->cqe_pad = 0;
- } else if (config->cqe_pad) {
- DRV_LOG(INFO, "Rx CQE padding is enabled");
- }
if (config->devx) {
err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
if (err) {
dev->data->port_id);
}
#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
- if (priv->config.cqe_pad) {
+ if (RTE_CACHE_LINE_SIZE == 128) {
cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
}
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
-/* Device parameter to enable RX completion entry padding to 128B. */
-#define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en"
-
/* Device parameter to enable padding Rx packet to cacheline size. */
#define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
}
config->cqe_comp = !!tmp;
config->cqe_comp_fmt = tmp;
- } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
- config->cqe_pad = !!tmp;
} else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
config->hw_padding = !!tmp;
} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
{
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
- MLX5_RXQ_CQE_PAD_EN,
MLX5_RXQ_PKT_PAD_EN,
MLX5_RX_MPRQ_EN,
MLX5_RX_MPRQ_LOG_STRIDE_NUM,
unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int cqe_comp_fmt:3; /* CQE compression format. */
- unsigned int cqe_pad:1; /* CQE padding is enabled. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
unsigned int mr_ext_memseg_en:1;
struct mlx5_priv *priv = NULL;
int err = 0;
unsigned int cqe_comp;
- unsigned int cqe_pad = 0;
struct rte_ether_addr mac;
char name[RTE_ETH_NAME_MAX_LEN];
int own_domain_id = 0;
DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
config->cqe_comp = 0;
}
- if (config->cqe_pad && !cqe_pad) {
- DRV_LOG(WARNING, "Rx CQE padding isn't supported.");
- config->cqe_pad = 0;
- } else if (config->cqe_pad) {
- DRV_LOG(INFO, "Rx CQE padding is enabled.");
- }
if (config->devx) {
err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
if (err) {