#include "mlx5_rx.h"
#include "mlx5_tx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
#include "mlx5_flow.h"
#include "mlx5_devx.h"
-#define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
-
static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
/* Spinlock for mlx5_shared_data allocation. */
static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+/* rte flow indexed pool configuration. */
+static struct mlx5_indexed_pool_config icfg[] = {
+ {
+ .size = sizeof(struct rte_flow),
+ .trunk_size = 64,
+ .need_lock = 1,
+ .release_mem_en = 0,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .per_core_cache = 0,
+ .type = "ctl_flow_ipool",
+ },
+ {
+ .size = sizeof(struct rte_flow),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 1,
+ .release_mem_en = 0,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .per_core_cache = 1 << 14,
+ .type = "rte_flow_ipool",
+ },
+ {
+ .size = sizeof(struct rte_flow),
+ .trunk_size = 64,
+ .grow_trunk = 3,
+ .grow_shift = 2,
+ .need_lock = 1,
+ .release_mem_en = 0,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .per_core_cache = 0,
+ .type = "mcp_flow_ipool",
+ },
+};
+
/**
* Initialize shared data between primary and secondary process.
*
/**
* Get mlx5 device attributes.
*
- * @param ctx
- * Pointer to device context.
+ * @param cdev
+ * Pointer to mlx5 device.
*
* @param device_attr
* Pointer to mlx5 device attributes.
*
* @return
- * 0 on success, non zero error number otherwise
+ * 0 on success, non zero error number otherwise.
*/
int
-mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
+mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
+ struct mlx5_dev_attr *device_attr)
{
struct mlx5_context *mlx5_ctx;
- struct mlx5_hca_attr hca_attr;
void *pv_iseg = NULL;
u32 cb_iseg = 0;
int err = 0;
- if (!ctx)
+ if (!cdev || !cdev->ctx)
return -EINVAL;
- mlx5_ctx = (struct mlx5_context *)ctx;
+ mlx5_ctx = (struct mlx5_context *)cdev->ctx;
memset(device_attr, 0, sizeof(*device_attr));
- err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr);
- if (err) {
- DRV_LOG(ERR, "Failed to get device hca_cap");
- return err;
- }
- device_attr->max_cq = 1 << hca_attr.log_max_cq;
- device_attr->max_qp = 1 << hca_attr.log_max_qp;
- device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz;
- device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz;
- device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz;
- device_attr->max_pd = 1 << hca_attr.log_max_pd;
- device_attr->max_srq = 1 << hca_attr.log_max_srq;
- device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz;
- if (hca_attr.rss_ind_tbl_cap) {
+ device_attr->max_cq = 1 << cdev->config.hca_attr.log_max_cq;
+ device_attr->max_qp = 1 << cdev->config.hca_attr.log_max_qp;
+ device_attr->max_qp_wr = 1 << cdev->config.hca_attr.log_max_qp_sz;
+ device_attr->max_cqe = 1 << cdev->config.hca_attr.log_max_cq_sz;
+ device_attr->max_mr = 1 << cdev->config.hca_attr.log_max_mrw_sz;
+ device_attr->max_pd = 1 << cdev->config.hca_attr.log_max_pd;
+ device_attr->max_srq = 1 << cdev->config.hca_attr.log_max_srq;
+ device_attr->max_srq_wr = 1 << cdev->config.hca_attr.log_max_srq_sz;
+ device_attr->max_tso = 1 << cdev->config.hca_attr.max_lso_cap;
+ if (cdev->config.hca_attr.rss_ind_tbl_cap) {
device_attr->max_rwq_indirection_table_size =
- 1 << hca_attr.rss_ind_tbl_cap;
+ 1 << cdev->config.hca_attr.rss_ind_tbl_cap;
}
+ device_attr->sw_parsing_offloads =
+ mlx5_get_supported_sw_parsing_offloads(&cdev->config.hca_attr);
+ device_attr->tunnel_offloads_caps =
+ mlx5_get_supported_tunneling_offloads(&cdev->config.hca_attr);
pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
if (pv_iseg == NULL) {
DRV_LOG(ERR, "Failed to get device hca_iseg");
return -ENOTSUP;
}
-/**
- * Function API open device under Windows
- *
- * This function calls the Windows glue APIs to open a device.
- *
- * @param[in] spawn
- * Pointer to the device attributes (name, port, etc).
- * @param[out] config
- * Pointer to device configuration structure.
- * @param[out] sh
- * Pointer to shared context structure.
- *
- * @return
- * 0 on success, a positive error value otherwise.
- */
-int
-mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
- const struct mlx5_dev_config *config,
- struct mlx5_dev_ctx_shared *sh)
-{
- RTE_SET_USED(config);
- int err = 0;
- struct mlx5_context *mlx5_ctx;
-
- pthread_mutex_init(&sh->txpp.mutex, NULL);
- /* Set numa node from pci probe */
- sh->numa_node = spawn->pci_dev->device.numa_node;
-
- /* Try to open device with DevX */
- rte_errno = 0;
- sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
- if (!sh->ctx) {
- DRV_LOG(ERR, "open_device failed");
- err = errno;
- return err;
- }
- sh->devx = 1;
- mlx5_ctx = (struct mlx5_context *)sh->ctx;
- err = mlx5_glue->query_device(spawn->phys_dev, &mlx5_ctx->mlx5_dev);
- if (err)
- DRV_LOG(ERR, "Failed to query device context fields.");
- return err;
-}
-
/**
* DV flow counter mode detect and config.
*
fallback = true;
#else
fallback = false;
- if (!priv->config.devx || !priv->config.dv_flow_en ||
+ if (!sh->devx || !priv->config.dv_flow_en ||
!priv->config.hca_attr.flow_counters_dump ||
!(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
(mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
}
/**
- * Spawn an Ethernet device from Verbs information.
+ * Spawn an Ethernet device from DevX information.
*
* @param dpdk_dev
* Backing DPDK device.
char name[RTE_ETH_NAME_MAX_LEN];
int own_domain_id = 0;
uint16_t port_id;
+ int i;
/* Build device name. */
strlcpy(name, dpdk_dev->name, sizeof(name));
strerror(rte_errno));
goto error;
}
- mlx5_malloc_mem_select(config->sys_mem_en);
sh = mlx5_alloc_shared_dev_ctx(spawn, config);
if (!sh)
return NULL;
- config->devx = sh->devx;
/* Initialize the shutdown event in mlx5_dev_spawn to
* support mlx5_is_removed for Windows.
*/
- err = mlx5_glue->devx_init_showdown_event(sh->ctx);
+ err = mlx5_glue->devx_init_showdown_event(sh->cdev->ctx);
if (err) {
DRV_LOG(ERR, "failed to init showdown event: %s",
strerror(errno));
goto error;
}
DRV_LOG(DEBUG, "MPW isn't supported");
- mlx5_os_get_dev_attr(sh->ctx, &device_attr);
- config->swp = 0;
+ mlx5_os_get_dev_attr(sh->cdev, &device_attr);
+ config->swp = device_attr.sw_parsing_offloads &
+ (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
+ MLX5_SW_PARSING_TSO_CAP);
config->ind_table_max_size =
sh->device_attr.max_rwq_indirection_table_size;
- if (RTE_CACHE_LINE_SIZE == 128 &&
- !(device_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
- cqe_comp = 0;
- else
- cqe_comp = 1;
+ cqe_comp = 0;
config->cqe_comp = cqe_comp;
- DRV_LOG(DEBUG, "tunnel offloading is not supported");
- config->tunnel_en = 0;
+ config->tunnel_en = device_attr.tunnel_offloads_caps &
+ (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP |
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
+ if (config->tunnel_en) {
+ DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
+ config->tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
+ config->tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
+ config->tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : ""
+ );
+ } else {
+ DRV_LOG(DEBUG, "tunnel offloading is not supported");
+ }
DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
config->mpls_en = 0;
/* Allocate private eth device data. */
* Look for sibling devices in order to reuse their switch domain
* if any, otherwise allocate one.
*/
- MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
const struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
}
/* Override some values set by hardware configuration. */
mlx5_args(config, dpdk_dev->devargs);
- err = mlx5_dev_check_sibling_config(priv, config);
+ err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev);
if (err)
goto error;
- config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
- IBV_DEVICE_RAW_IP_CSUM);
- DRV_LOG(DEBUG, "checksum offloading is %ssupported",
- (config->hw_csum ? "" : "not "));
DRV_LOG(DEBUG, "counters are not supported");
config->ind_table_max_size =
sh->device_attr.max_rwq_indirection_table_size;
* Remove this check once DPDK supports larger/variable
* indirection tables.
*/
- if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
- config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+ if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
+ config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
config->ind_table_max_size);
- config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
- IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
- DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
- (config->hw_vlan_strip ? "" : "not "));
- config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
- IBV_RAW_PACKET_CAP_SCATTER_FCS);
if (config->hw_padding) {
DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
config->hw_padding = 0;
}
- config->tso = (sh->device_attr.max_tso > 0 &&
- (sh->device_attr.tso_supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
+ config->tso = (sh->device_attr.max_tso > 0);
if (config->tso)
config->tso_max_payload_sz = sh->device_attr.max_tso;
DRV_LOG(DEBUG, "%sMPS is %s.",
DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
config->cqe_comp = 0;
}
- if (config->devx) {
- err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
- if (err) {
- err = -err;
- goto error;
- }
- /* Check relax ordering support. */
- sh->cmng.relaxed_ordering_read = 0;
- sh->cmng.relaxed_ordering_write = 0;
- if (!haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write =
- config->hca_attr.relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read =
- config->hca_attr.relaxed_ordering_read;
- }
- }
- if (config->devx) {
+ if (sh->devx) {
+ config->hca_attr = sh->cdev->config.hca_attr;
+ config->hw_csum = config->hca_attr.csum_cap;
+ DRV_LOG(DEBUG, "checksum offloading is %ssupported",
+ (config->hw_csum ? "" : "not "));
+ config->hw_vlan_strip = config->hca_attr.vlan_cap;
+ DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
+ (config->hw_vlan_strip ? "" : "not "));
+ config->hw_fcs_strip = config->hca_attr.scatter_fcs;
+ }
+ if (sh->devx) {
uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
err = config->hca_attr.access_register_user ?
mlx5_devx_cmd_register_read
- (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
+ (sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
if (!err) {
uint32_t ts_mode;
(NS_PER_S / MS_PER_S))
config->rt_timestamp = 1;
}
- sh->rq_ts_format = config->hca_attr.rq_ts_format;
- sh->sq_ts_format = config->hca_attr.sq_ts_format;
- sh->qp_ts_format = config->hca_attr.qp_ts_format;
}
if (config->mprq.enabled) {
DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
if (priv->representor) {
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->representor_id = priv->representor_id;
+ MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
+ struct mlx5_priv *opriv =
+ rte_eth_devices[port_id].data->dev_private;
+ if (opriv &&
+ opriv->master &&
+ opriv->domain_id == priv->domain_id &&
+ opriv->sh == priv->sh) {
+ eth_dev->data->backer_port_id = port_id;
+ break;
+ }
+ }
+ if (port_id >= RTE_MAX_ETHPORTS)
+ eth_dev->data->backer_port_id = eth_dev->data->port_id;
}
/*
* Store associated network device interface index. This index
goto error;
}
DRV_LOG(INFO,
- "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
- eth_dev->data->port_id,
- mac.addr_bytes[0], mac.addr_bytes[1],
- mac.addr_bytes[2], mac.addr_bytes[3],
- mac.addr_bytes[4], mac.addr_bytes[5]);
+ "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
+ eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
#ifdef RTE_LIBRTE_MLX5_DEBUG
{
char ifname[MLX5_NAMESIZE];
eth_dev->rx_queue_count = mlx5_rx_queue_count;
/* Register MAC address. */
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
- priv->flows = 0;
priv->ctrl_flows = 0;
TAILQ_INIT(&priv->flow_meters);
- TAILQ_INIT(&priv->flow_meter_profiles);
+ priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
+ if (!priv->mtr_profile_tbl)
+ goto error;
/* Bring Ethernet device up. */
DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
eth_dev->data->port_id);
mlx5_set_min_inline(spawn, config);
/* Store device configuration on private structure. */
priv->config = *config;
+ for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+ icfg[i].release_mem_en = !!config->reclaim_mode;
+ if (config->reclaim_mode)
+ icfg[i].per_core_cache = 0;
+ priv->flows[i] = mlx5_ipool_create(&icfg[i]);
+ if (!priv->flows[i])
+ goto error;
+ }
/* Create context for virtual machine VLAN workaround. */
priv->vmwa_context = NULL;
if (config->dv_flow_en) {
err = ENOTSUP;
goto error;
}
- mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
- mlx5_hrxq_create_cb,
- mlx5_hrxq_match_cb,
- mlx5_hrxq_remove_cb);
+ priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
+ mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
+ mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,
+ mlx5_hrxq_clone_free_cb);
/* Query availability of metadata reg_c's. */
err = mlx5_flow_discover_mreg_c(eth_dev);
if (err < 0) {
goto error;
}
}
- if (config->devx && config->dv_flow_en) {
+ if (sh->devx && config->dv_flow_en) {
priv->obj_ops = devx_obj_ops;
} else {
DRV_LOG(ERR, "Flow mode %u is not supported "
return eth_dev;
error:
if (priv) {
+ if (priv->mtr_profile_tbl)
+ mlx5_l3t_destroy(priv->mtr_profile_tbl);
if (own_domain_id)
claim_zero(rte_eth_switch_domain_free(priv->domain_id));
mlx5_free(priv);
return -ENOTSUP;
}
-/**
- * Detect if a devx_device_bdf object has identical DBDF values to the
- * rte_pci_addr found in bus/pci probing
- *
- * @param[in] devx_bdf
- * Pointer to the devx_device_bdf structure.
- * @param[in] addr
- * Pointer to the rte_pci_addr structure.
- *
- * @return
- * 1 on Device match, 0 on mismatch.
- */
-static int
-mlx5_match_devx_bdf_to_addr(struct devx_device_bdf *devx_bdf,
- struct rte_pci_addr *addr)
-{
- if (addr->domain != (devx_bdf->bus_id >> 8) ||
- addr->bus != (devx_bdf->bus_id & 0xff) ||
- addr->devid != devx_bdf->dev_id ||
- addr->function != devx_bdf->fnc_id) {
- return 0;
- }
- return 1;
-}
-
-/**
- * Detect if a devx_device_bdf object matches the rte_pci_addr
- * found in bus/pci probing
- * Compare both the Native/PF BDF and the raw_bdf representing a VF BDF.
- *
- * @param[in] devx_bdf
- * Pointer to the devx_device_bdf structure.
- * @param[in] addr
- * Pointer to the rte_pci_addr structure.
- *
- * @return
- * 1 on Device match, 0 on mismatch, rte_errno code on failure.
- */
-static int
-mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf,
- struct rte_pci_addr *addr)
-{
- int err;
- struct devx_device mlx5_dev;
-
- if (mlx5_match_devx_bdf_to_addr(devx_bdf, addr))
- return 1;
- /**
- * Didn't match on Native/PF BDF, could still
- * Match a VF BDF, check it next
- */
- err = mlx5_glue->query_device(devx_bdf, &mlx5_dev);
- if (err) {
- DRV_LOG(ERR, "query_device failed");
- rte_errno = err;
- return rte_errno;
- }
- if (mlx5_match_devx_bdf_to_addr(&mlx5_dev.raw_bdf, addr))
- return 1;
- return 0;
-}
-
/**
* DPDK callback to register a PCI device.
*
- * This function spawns Ethernet devices out of a given PCI device.
+ * This function spawns Ethernet devices out of a given device.
*
- * @param[in] pci_drv
- * PCI driver structure (mlx5_driver).
- * @param[in] pci_dev
- * PCI device information.
+ * @param[in] dev
+ * Pointer to the common device.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
+mlx5_os_net_probe(struct mlx5_common_device *cdev)
{
- struct devx_device_bdf *devx_bdf_devs, *orig_devx_bdf_devs;
- /*
- * Number of found IB Devices matching with requested PCI BDF.
- * nd != 1 means there are multiple IB devices over the same
- * PCI device and we have representors and master.
- */
- unsigned int nd = 0;
- /*
- * Number of found IB device Ports. nd = 1 and np = 1..n means
- * we have the single multiport IB device, and there may be
- * representors attached to some of found ports.
- * Currently not supported.
- * unsigned int np = 0;
- */
-
- /*
- * Number of DPDK ethernet devices to Spawn - either over
- * multiple IB devices or multiple ports of single IB device.
- * Actually this is the number of iterations to spawn.
- */
- unsigned int ns = 0;
- /*
- * Bonding device
- * < 0 - no bonding device (single one)
- * >= 0 - bonding device (value is slave PF index)
- */
- int bd = -1;
- struct mlx5_dev_spawn_data *list = NULL;
- struct mlx5_dev_config dev_config;
- unsigned int dev_config_vf;
- int ret, err;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
+ struct mlx5_dev_spawn_data spawn = {
+ .pf_bond = -1,
+ .max_port = 1,
+ .phys_port = 1,
+ .phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx),
+ .pci_dev = pci_dev,
+ .cdev = cdev,
+ .ifindex = -1, /* Spawn will assign */
+ .info = (struct mlx5_switch_info){
+ .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
+ },
+ };
+ struct mlx5_dev_config dev_config = {
+ .rx_vec_en = 1,
+ .txq_inline_max = MLX5_ARG_UNSET,
+ .txq_inline_min = MLX5_ARG_UNSET,
+ .txq_inline_mpw = MLX5_ARG_UNSET,
+ .txqs_inline = MLX5_ARG_UNSET,
+ .mprq = {
+ .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
+ .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
+ },
+ .dv_flow_en = 1,
+ .log_hp_size = MLX5_ARG_UNSET,
+ };
+ int ret;
uint32_t restore;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
strerror(rte_errno));
return -rte_errno;
}
- errno = 0;
- devx_bdf_devs = mlx5_glue->get_device_list(&ret);
- orig_devx_bdf_devs = devx_bdf_devs;
- if (!devx_bdf_devs) {
- rte_errno = errno ? errno : ENOSYS;
- DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
- return -rte_errno;
- }
- /*
- * First scan the list of all Infiniband devices to find
- * matching ones, gathering into the list.
- */
- struct devx_device_bdf *devx_bdf_match[ret + 1];
-
- while (ret-- > 0) {
- err = mlx5_match_devx_devices_to_addr(devx_bdf_devs,
- &pci_dev->addr);
- if (!err) {
- devx_bdf_devs++;
- continue;
- }
- if (err != 1) {
- ret = -err;
- goto exit;
- }
- devx_bdf_match[nd++] = devx_bdf_devs;
- }
- devx_bdf_match[nd] = NULL;
- if (!nd) {
- /* No device matches, just complain and bail out. */
- DRV_LOG(WARNING,
- "no DevX device matches PCI device " PCI_PRI_FMT ","
- " is DevX Configured?",
- pci_dev->addr.domain, pci_dev->addr.bus,
- pci_dev->addr.devid, pci_dev->addr.function);
- rte_errno = ENOENT;
- ret = -rte_errno;
- goto exit;
- }
- /*
- * Now we can determine the maximal
- * amount of devices to be spawned.
- */
- list = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(struct mlx5_dev_spawn_data),
- RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
- if (!list) {
- DRV_LOG(ERR, "spawn data array allocation failure");
- rte_errno = ENOMEM;
- ret = -rte_errno;
- goto exit;
- }
- memset(&list[ns].info, 0, sizeof(list[ns].info));
- list[ns].max_port = 1;
- list[ns].phys_port = 1;
- list[ns].phys_dev = devx_bdf_match[ns];
- list[ns].eth_dev = NULL;
- list[ns].pci_dev = pci_dev;
- list[ns].pf_bond = bd;
- list[ns].ifindex = -1; /* Spawn will assign */
- list[ns].info =
- (struct mlx5_switch_info){
- .master = 0,
- .representor = 0,
- .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
- .port_name = 0,
- .switch_id = 0,
- };
/* Device specific configuration. */
switch (pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
- dev_config_vf = 1;
+ dev_config.vf = 1;
break;
default:
- dev_config_vf = 0;
+ dev_config.vf = 0;
break;
}
- /* Default configuration. */
- memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
- dev_config.vf = dev_config_vf;
- dev_config.mps = 0;
- dev_config.dbnc = MLX5_ARG_UNSET;
- dev_config.rx_vec_en = 1;
- dev_config.txq_inline_max = MLX5_ARG_UNSET;
- dev_config.txq_inline_min = MLX5_ARG_UNSET;
- dev_config.txq_inline_mpw = MLX5_ARG_UNSET;
- dev_config.txqs_inline = MLX5_ARG_UNSET;
- dev_config.vf_nl_en = 0;
- dev_config.mr_ext_memseg_en = 1;
- dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
- dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
- dev_config.dv_esw_en = 0;
- dev_config.dv_flow_en = 1;
- dev_config.decap_en = 0;
- dev_config.log_hp_size = MLX5_ARG_UNSET;
- list[ns].eth_dev = mlx5_dev_spawn(&pci_dev->device,
- &list[ns],
- &dev_config);
- if (!list[ns].eth_dev)
- goto exit;
- restore = list[ns].eth_dev->data->dev_flags;
- rte_eth_copy_pci_info(list[ns].eth_dev, pci_dev);
+ spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
+ if (!spawn.eth_dev)
+ return -rte_errno;
+ restore = spawn.eth_dev->data->dev_flags;
+ rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
/* Restore non-PCI flags cleared by the above call. */
- list[ns].eth_dev->data->dev_flags |= restore;
- rte_eth_dev_probing_finish(list[ns].eth_dev);
- ret = 0;
-exit:
- /*
- * Do the routine cleanup:
- * - free allocated spawn data array
- * - free the device list
- */
- if (list)
- mlx5_free(list);
- MLX5_ASSERT(orig_devx_bdf_devs);
- mlx5_glue->free_device_list(orig_devx_bdf_devs);
- return ret;
+ spawn.eth_dev->data->dev_flags |= restore;
+ rte_eth_dev_probing_finish(spawn.eth_dev);
+ return 0;
}
/**
- * Set the reg_mr and dereg_mr call backs
- *
- * @param reg_mr_cb[out]
- * Pointer to reg_mr func
- * @param dereg_mr_cb[out]
- * Pointer to dereg_mr func
- *
+ * Cleanup resources when the last device is closed.
*/
void
-mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
- mlx5_dereg_mr_t *dereg_mr_cb)
+mlx5_os_net_cleanup(void)
{
- *reg_mr_cb = mlx5_os_reg_mr;
- *dereg_mr_cb = mlx5_os_dereg_mr;
-}
-
-/**
- * Extract pdn of PD object using DevX
- *
- * @param[in] pd
- * Pointer to the DevX PD object.
- * @param[out] pdn
- * Pointer to the PD object number variable.
- *
- * @return
- * 0 on success, error value otherwise.
- */
-int
-mlx5_os_get_pdn(void *pd, uint32_t *pdn)
-{
- if (!pd)
- return -EINVAL;
-
- *pdn = ((struct mlx5_pd *)pd)->pdn;
- return 0;
}
const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};