#include <stddef.h>
#include <unistd.h>
#include <string.h>
-#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <errno.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_kvargs.h>
#include <rte_rwlock.h>
#include <rte_spinlock.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
+#include <mlx5_common.h>
#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_autoconf.h"
#include "mlx5_mr.h"
#include "mlx5_flow.h"
+#include "rte_pmd_mlx5.h"
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
/* Configure timeout of LRO session (in microseconds). */
#define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
+/*
+ * Device parameter to configure the total data buffer size for a single
+ * hairpin queue (logarithm value).
+ */
+#define MLX5_HP_BUF_SIZE "hp_buf_log_sz"
+
#ifndef HAVE_IBV_MLX5_MOD_MPW
#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
if (pool->curr == pool->last) {
size = pool->curr - pool->free_arr;
size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
- assert(size2 > size);
+ MLX5_ASSERT(size2 > size);
mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
claim_zero
(mlx5_glue->destroy_flow_action
(pool->counters_raw[j].action));
- if (!batch && pool->counters_raw[j].dcs)
+ if (!batch && MLX5_GET_POOL_CNT_EXT
+ (pool, j)->dcs)
claim_zero(mlx5_devx_cmd_destroy
- (pool->counters_raw[j].dcs));
+ (MLX5_GET_POOL_CNT_EXT
+ (pool, j)->dcs));
}
TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool,
next);
char *env;
int value;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* Get environment variable to store. */
env = getenv(MLX5_SHUT_UP_BF);
value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
static void
mlx5_restore_doorbell_mapping_env(int value)
{
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* Restore the original environment variable state. */
if (value == MLX5_ARG_UNSET)
unsetenv(MLX5_SHUT_UP_BF);
struct mlx5_devx_tis_attr tis_attr = { 0 };
#endif
- assert(spawn);
+ MLX5_ASSERT(spawn);
/* Secondary process should not create the shared context. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
pthread_mutex_lock(&mlx5_ibv_list_mutex);
/* Search for IB context by device name. */
LIST_FOREACH(sh, &mlx5_ibv_list, next) {
}
}
/* No device found, we have to create new shared context. */
- assert(spawn->max_port);
+ MLX5_ASSERT(spawn->max_port);
sh = rte_zmalloc("ethdev shared ib context",
sizeof(struct mlx5_ibv_shared) +
spawn->max_port *
return sh;
error:
pthread_mutex_unlock(&mlx5_ibv_list_mutex);
- assert(sh);
+ MLX5_ASSERT(sh);
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
if (sh->flow_id_pool)
mlx5_flow_id_pool_release(sh->flow_id_pool);
rte_free(sh);
- assert(err > 0);
+ MLX5_ASSERT(err > 0);
rte_errno = err;
return NULL;
}
mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
{
pthread_mutex_lock(&mlx5_ibv_list_mutex);
-#ifndef NDEBUG
+#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
struct mlx5_ibv_shared *lctx;
LIST_FOREACH(lctx, &mlx5_ibv_list, next)
if (lctx == sh)
break;
- assert(lctx);
+ MLX5_ASSERT(lctx);
if (lctx != sh) {
DRV_LOG(ERR, "Freeing non-existing shared IB context");
goto exit;
}
#endif
- assert(sh);
- assert(sh->refcnt);
+ MLX5_ASSERT(sh);
+ MLX5_ASSERT(sh->refcnt);
/* Secondary process should not free the shared context. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
if (--sh->refcnt)
goto exit;
- /* Release created Memory Regions. */
- mlx5_mr_release(sh);
/* Remove from memory callback device list. */
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
LIST_REMOVE(sh, mem_event_cb);
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ /* Release created Memory Regions. */
+ mlx5_mr_release(sh);
/* Remove context from the global device list. */
LIST_REMOVE(sh, next);
/*
* Only primary process handles async device events.
**/
mlx5_flow_counters_mng_close(sh);
- assert(!sh->intr_cnt);
+ MLX5_ASSERT(!sh->intr_cnt);
if (sh->intr_cnt)
mlx5_intr_callback_unregister
(&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
if (pos) {
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
- assert(tbl_data);
+ MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
}
if (pos) {
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
- assert(tbl_data);
+ MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
}
if (pos) {
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
- assert(tbl_data);
+ MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
}
char s[MLX5_HLIST_NAMESIZE];
int err = 0;
- assert(sh);
+ MLX5_ASSERT(sh);
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
if (!sh->flow_tbls) {
return;
priv->dr_shared = 0;
sh = priv->sh;
- assert(sh);
+ MLX5_ASSERT(sh);
#ifdef HAVE_MLX5DV_DR
- assert(sh->dv_refcnt);
+ MLX5_ASSERT(sh->dv_refcnt);
if (sh->dv_refcnt && --sh->dv_refcnt)
return;
if (sh->rx_domain) {
socket = ctrl->socket;
}
- assert(data != NULL);
+ MLX5_ASSERT(data != NULL);
ret = rte_malloc_socket(__func__, size, alignment, socket);
if (!ret && size)
rte_errno = ENOMEM;
static void
mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
{
- assert(data != NULL);
+ MLX5_ASSERT(data != NULL);
rte_free(ptr);
}
mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_udp_tunnel *udp_tunnel)
{
- assert(udp_tunnel != NULL);
+ MLX5_ASSERT(udp_tunnel != NULL);
if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
udp_tunnel->udp_port == 4789)
return 0;
/* In case mlx5_dev_stop() has not been called. */
mlx5_dev_interrupt_handler_uninstall(dev);
mlx5_dev_interrupt_handler_devx_uninstall(dev);
+ /*
+ * If default mreg copy action is removed at the stop stage,
+ * the search will return none and nothing will be done anymore.
+ */
+ mlx5_flow_stop_default(dev);
mlx5_traffic_disable(dev);
- mlx5_flow_flush(dev, NULL);
+ /*
+ * If all the flows are already flushed in the device stop stage,
+ * then this will return directly without any action.
+ */
+ mlx5_flow_list_flush(dev, &priv->flows, true);
mlx5_flow_meter_flush(dev, NULL);
+ /* Free the intermediate buffers for flow creation. */
+ mlx5_flow_free_intermediate(dev);
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
if (priv->config.vf)
- mlx5_nl_mac_addr_flush(dev);
+ mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
+ dev->data->mac_addrs,
+ MLX5_MAX_MAC_ADDRESSES, priv->mac_own);
if (priv->nl_socket_route >= 0)
close(priv->nl_socket_route);
if (priv->nl_socket_rdma >= 0)
.filter_ctrl = mlx5_dev_filter_ctrl,
.rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status,
+ .rxq_info_get = mlx5_rxq_info_get,
+ .txq_info_get = mlx5_txq_info_get,
+ .rx_burst_mode_get = mlx5_rx_burst_mode_get,
+ .tx_burst_mode_get = mlx5_tx_burst_mode_get,
.rx_queue_count = mlx5_rx_queue_count,
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
.dev_infos_get = mlx5_dev_infos_get,
.rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status,
+ .rxq_info_get = mlx5_rxq_info_get,
+ .txq_info_get = mlx5_txq_info_get,
+ .rx_burst_mode_get = mlx5_rx_burst_mode_get,
+ .tx_burst_mode_get = mlx5_tx_burst_mode_get,
.get_module_info = mlx5_get_module_info,
.get_module_eeprom = mlx5_get_module_eeprom,
};
.filter_ctrl = mlx5_dev_filter_ctrl,
.rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status,
+ .rxq_info_get = mlx5_rxq_info_get,
+ .txq_info_get = mlx5_txq_info_get,
+ .rx_burst_mode_get = mlx5_rx_burst_mode_get,
+ .tx_burst_mode_get = mlx5_tx_burst_mode_get,
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
.is_removed = mlx5_is_removed,
config->max_dump_files_num = tmp;
} else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
config->lro.timeout = tmp;
+ } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) {
+ DRV_LOG(DEBUG, "class argument is %s.", val);
+ } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) {
+ config->log_hp_size = tmp;
} else {
DRV_LOG(WARNING, "%s: unknown parameter", key);
rte_errno = EINVAL;
MLX5_REPRESENTOR,
MLX5_MAX_DUMP_FILES_NUM,
MLX5_LRO_TIMEOUT_USEC,
+ MLX5_CLASS_ARG_NAME,
+ MLX5_HP_BUF_SIZE,
NULL,
};
struct rte_kvargs *kvlist;
if (mlx5_init_shared_data())
return -rte_errno;
sd = mlx5_shared_data;
- assert(sd);
+ MLX5_ASSERT(sd);
rte_spinlock_lock(&sd->lock);
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
* key is specified in devargs
* - if DevX is enabled the inline mode is queried from the
* device (HCA attributes and NIC vport context if needed).
- * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX
+ * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
* and none (0 bytes) for other NICs
*
* @param spawn
default:
meta = 0;
mark = 0;
- assert(false);
+ MLX5_ASSERT(false);
break;
}
if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
; /* Empty. */
/* Find the first clear bit. */
j = rte_bsf64(~page->dbr_bitmap[i]);
- assert(i < (MLX5_DBR_PER_PAGE / 64));
+ MLX5_ASSERT(i < (MLX5_DBR_PER_PAGE / 64));
page->dbr_bitmap[i] |= (1 << j);
page->dbr_count++;
*dbr_page = page;
return ret;
}
+int
+rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
+{
+ static const char *const dynf_names[] = {
+ RTE_PMD_MLX5_FINE_GRANULARITY_INLINE,
+ RTE_MBUF_DYNFLAG_METADATA_NAME
+ };
+ unsigned int i;
+
+ if (n < RTE_DIM(dynf_names))
+ return -ENOMEM;
+ for (i = 0; i < RTE_DIM(dynf_names); i++) {
+ if (names[i] == NULL)
+ return -EINVAL;
+ strcpy(names[i], dynf_names[i]);
+ }
+ return RTE_DIM(dynf_names);
+}
+
/**
* Check sibling device configurations.
*
struct mlx5_dev_config *sh_conf = NULL;
uint16_t port_id;
- assert(sh);
+ MLX5_ASSERT(sh);
/* Nothing to compare for the single/first device. */
if (sh->refcnt == 1)
return 0;
/* Some internal functions rely on Netlink sockets, open them now. */
priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
- priv->nl_sn = 0;
priv->representor = !!switch_info->representor;
priv->master = !!switch_info->master;
priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
* is permanent throughout the lifetime of device. So, we may store
* the ifindex here and use the cached value further.
*/
- assert(spawn->ifindex);
+ MLX5_ASSERT(spawn->ifindex);
priv->if_index = spawn->ifindex;
eth_dev->data->dev_private = priv;
priv->dev_data = eth_dev->data;
mac.addr_bytes[0], mac.addr_bytes[1],
mac.addr_bytes[2], mac.addr_bytes[3],
mac.addr_bytes[4], mac.addr_bytes[5]);
-#ifndef NDEBUG
+#ifdef RTE_LIBRTE_MLX5_DEBUG
{
char ifname[IF_NAMESIZE];
/* Register MAC address. */
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
if (config.vf && config.vf_nl_en)
- mlx5_nl_mac_addr_sync(eth_dev);
+ mlx5_nl_mac_addr_sync(priv->nl_socket_route,
+ mlx5_ifindex(eth_dev),
+ eth_dev->data->mac_addrs,
+ MLX5_MAX_MAC_ADDRESSES);
TAILQ_INIT(&priv->flows);
TAILQ_INIT(&priv->ctrl_flows);
TAILQ_INIT(&priv->flow_meters);
err = ENOTSUP;
goto error;
}
+ /*
+ * Allocate the buffer for flow creating, just once.
+ * The allocation must be done before any flow creating.
+ */
+ mlx5_flow_alloc_intermediate(eth_dev);
/* Query availibility of metadata reg_c's. */
err = mlx5_flow_discover_mreg_c(eth_dev);
if (err < 0) {
}
if (sh)
mlx5_free_shared_ibctx(sh);
- assert(err > 0);
+ MLX5_ASSERT(err > 0);
rte_errno = err;
return NULL;
}
if (!file)
return -1;
/* Use safe format to check maximal buffer length. */
- assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
+ MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
char tmp_str[IF_NAMESIZE + 32];
struct rte_pci_addr pci_addr;
struct mlx5_dev_config dev_config;
int ret;
+ if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_NET) {
+ DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
+ " driver.");
+ return 1;
+ }
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
mlx5_pmd_socket_init();
ret = mlx5_init_once();
strerror(rte_errno));
return -rte_errno;
}
- assert(pci_drv == &mlx5_driver);
+ MLX5_ASSERT(pci_drv == &mlx5_driver);
errno = 0;
ibv_list = mlx5_glue->get_device_list(&ret);
if (!ibv_list) {
* it may be E-Switch master device and representors.
* We have to perform identification trough the ports.
*/
- assert(nl_rdma >= 0);
- assert(ns == 0);
- assert(nd == 1);
- assert(np);
+ MLX5_ASSERT(nl_rdma >= 0);
+ MLX5_ASSERT(ns == 0);
+ MLX5_ASSERT(nd == 1);
+ MLX5_ASSERT(np);
for (i = 1; i <= np; ++i) {
list[ns].max_port = np;
list[ns].ibv_port = i;
goto exit;
}
}
- assert(ns);
+ MLX5_ASSERT(ns);
/*
* Sort list to probe devices in natural order for users convenience
* (i.e. master first, then representors from lowest to highest ID).
},
.dv_esw_en = 1,
.dv_flow_en = 1,
+ .log_hp_size = MLX5_ARG_UNSET,
};
/* Device specific configuration. */
switch (pci_dev->id.device_id) {
close(nl_route);
if (list)
rte_free(list);
- assert(ibv_list);
+ MLX5_ASSERT(ibv_list);
mlx5_glue->free_device_list(ibv_list);
return ret;
}
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
},
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
+ },
{
.vendor_id = 0
}