#include <fcntl.h>
#include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_common.h>
#endif
}
+static void
+mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ void *ctx = priv->sh->ctx;
+
+ priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
+ if (!priv->q_counters) {
+ struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
+ struct ibv_wq *wq;
+
+ DRV_LOG(DEBUG, "Port %d queue counter object cannot be created "
+ "by DevX - fall-back to use the kernel driver global "
+ "queue counter.", dev->data->port_id);
+ /* Create WQ by kernel and query its queue counter ID. */
+ if (cq) {
+ wq = mlx5_glue->create_wq(ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = priv->sh->pd,
+ .cq = cq,
+ });
+ if (wq) {
+ /* Counter is assigned only on RDY state. */
+ int ret = mlx5_glue->modify_wq(wq,
+ &(struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_STATE,
+ .wq_state = IBV_WQS_RDY,
+ });
+
+ if (ret == 0)
+ mlx5_devx_cmd_wq_query(wq,
+ &priv->counter_set_id);
+ claim_zero(mlx5_glue->destroy_wq(wq));
+ }
+ claim_zero(mlx5_glue->destroy_cq(cq));
+ }
+ } else {
+ priv->counter_set_id = priv->q_counters->id;
+ }
+ if (priv->counter_set_id == 0)
+ DRV_LOG(INFO, "Part of the port %d statistics will not be "
+ "available.", dev->data->port_id);
+}
+
/**
* Spawn an Ethernet device from Verbs information.
*
int err = 0;
unsigned int hw_padding = 0;
unsigned int mps;
- unsigned int cqe_comp;
- unsigned int cqe_pad = 0;
unsigned int tunnel_en = 0;
unsigned int mpls_en = 0;
unsigned int swp = 0;
strerror(rte_errno));
return NULL;
}
+ if (eth_da.type == RTE_ETH_REPRESENTOR_NONE) {
+ /* Representor not specified. */
+ rte_errno = EBUSY;
+ return NULL;
+ }
+ if (eth_da.type != RTE_ETH_REPRESENTOR_VF) {
+ rte_errno = ENOTSUP;
+ DRV_LOG(ERR, "unsupported representor type: %s",
+ dpdk_dev->devargs->args);
+ return NULL;
+ }
for (i = 0; i < eth_da.nb_representor_ports; ++i)
if (eth_da.representor_ports[i] ==
(uint16_t)switch_info->port_name)
mprq_caps.max_single_wqe_log_num_of_strides;
}
#endif
- if (RTE_CACHE_LINE_SIZE == 128 &&
- !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
- cqe_comp = 0;
- else
- cqe_comp = 1;
- config->cqe_comp = cqe_comp;
-#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
- /* Whether device supports 128B Rx CQE padding. */
- cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
- (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
-#endif
+ /* Rx CQE compression is enabled by default. */
+ config->cqe_comp = 1;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
tunnel_en = ((dv_attr.tunnel_offloads_caps &
priv->dev_port = spawn->phys_port;
priv->pci_dev = spawn->pci_dev;
priv->mtu = RTE_ETHER_MTU;
- priv->mp_id.port_id = port_id;
- strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
/* Some internal functions rely on Netlink sockets, open them now. */
priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
config->mps == MLX5_MPW ? "legacy " : "",
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- if (config->cqe_comp && !cqe_comp) {
- DRV_LOG(WARNING, "Rx CQE compression isn't supported");
- config->cqe_comp = 0;
- }
- if (config->cqe_pad && !cqe_pad) {
- DRV_LOG(WARNING, "Rx CQE padding isn't supported");
- config->cqe_pad = 0;
- } else if (config->cqe_pad) {
- DRV_LOG(INFO, "Rx CQE padding is enabled");
- }
if (config->devx) {
err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
if (err) {
sh->cmng.relaxed_ordering_read = 0;
sh->cmng.relaxed_ordering_write = 0;
}
+ sh->rq_ts_format = config->hca_attr.rq_ts_format;
+ sh->sq_ts_format = config->hca_attr.sq_ts_format;
+ sh->qp_ts_format = config->hca_attr.qp_ts_format;
/* Check for LRO support. */
if (config->dest_tir && config->hca_attr.lro_cap &&
config->dv_flow_en) {
}
#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
if (config->hca_attr.qos.sup &&
- config->hca_attr.qos.srtcm_sup &&
+ config->hca_attr.qos.flow_meter_old &&
config->dv_flow_en) {
uint8_t reg_c_mask =
config->hca_attr.qos.flow_meter_reg_c_ids;
- 1 + REG_C_0;
priv->mtr_en = 1;
priv->mtr_reg_share =
- config->hca_attr.qos.flow_meter_reg_share;
+ config->hca_attr.qos.flow_meter;
DRV_LOG(DEBUG, "The REG_C meter uses is %d",
priv->mtr_color_reg);
}
if (config->hca_attr.log_max_ft_sampler_num > 0 &&
config->dv_flow_en) {
priv->sampler_en = 1;
- DRV_LOG(DEBUG, "The Sampler enabled!\n");
+ DRV_LOG(DEBUG, "Sampler enabled!");
} else {
priv->sampler_en = 0;
if (!config->hca_attr.log_max_ft_sampler_num)
- DRV_LOG(WARNING, "No available register for"
- " Sampler.");
+ DRV_LOG(WARNING,
+ "No available register for sampler.");
else
- DRV_LOG(DEBUG, "DV flow is not supported!\n");
+ DRV_LOG(DEBUG, "DV flow is not supported!");
}
#endif
}
+ if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
+ !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
+ DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
+ config->cqe_comp = 0;
+ }
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
+ (!config->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) {
+ DRV_LOG(WARNING, "Flow Tag CQE compression"
+ " format isn't supported.");
+ config->cqe_comp = 0;
+ }
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
+ (!config->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) {
+ DRV_LOG(WARNING, "L3/L4 Header CQE compression"
+ " format isn't supported.");
+ config->cqe_comp = 0;
+ }
+ DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
+ config->cqe_comp ? "" : "not ");
if (config->tx_pp) {
DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
config->hca_attr.dev_freq_khz);
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->representor_id = priv->representor_id;
}
+ priv->mp_id.port_id = eth_dev->data->port_id;
+ strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
/*
* Store associated network device interface index. This index
* is permanent throughout the lifetime of device. So, we may store
mac.addr_bytes[4], mac.addr_bytes[5]);
#ifdef RTE_LIBRTE_MLX5_DEBUG
{
- char ifname[IF_NAMESIZE];
+ char ifname[MLX5_NAMESIZE];
if (mlx5_get_ifname(eth_dev, &ifname) == 0)
DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
/* Use specific wrappers for Tx object. */
priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
+ mlx5_queue_counter_id_prepare(eth_dev);
} else {
priv->obj_ops = ibv_obj_ops;
int fd;
if (priv->sh) {
+ if (priv->q_counters != NULL &&
+ strcmp(ctr_name, "out_of_buffer") == 0)
+ return mlx5_devx_cmd_queue_counter_query(priv->sh->ctx,
+ 0, (uint32_t *)stat);
MKSTR(path, "%s/ports/%d/hw_counters/%s",
priv->sh->ibdev_path,
priv->dev_port,