#include <rte_malloc.h>
#include <ethdev_driver.h>
-#include <ethdev_pci.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_common.h>
#include <mlx5_common.h>
#include <mlx5_common_os.h>
#include <mlx5_common_mp.h>
-#include <mlx5_common_pci.h>
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5_flow_os.h"
#include "rte_pmd_mlx5.h"
+#define MLX5_ETH_DRIVER_NAME mlx5_eth
+
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
MLX5_REPRESENTOR_REPR(-1) == repr;
}
+/**
+ * Decide whether representor ID is a SF port representor.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * Non-zero if HPF, otherwise 0.
+ */
+bool
+mlx5_is_sf_repr(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
+
+ return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF;
+}
+
/**
* Initialize the ASO aging management structure.
*
priv->rxqs_n = 0;
priv->rxqs = NULL;
}
+ if (priv->representor) {
+ /* Each representor has a dedicated interrupts handler */
+ mlx5_free(dev->intr_handle);
+ dev->intr_handle = NULL;
+ }
if (priv->txqs != NULL) {
/* XXX race condition if mlx5_tx_burst() is still running. */
rte_delay_us_sleep(1000);
.xstats_get_names = mlx5_xstats_get_names,
.fw_version_get = mlx5_fw_version_get,
.dev_infos_get = mlx5_dev_infos_get,
+ .representor_info_get = mlx5_representor_info_get,
.read_clock = mlx5_txpp_read_clock,
.rx_queue_start = mlx5_rx_queue_start,
.rx_queue_stop = mlx5_rx_queue_stop,
.xstats_get_names = mlx5_xstats_get_names,
.fw_version_get = mlx5_fw_version_get,
.dev_infos_get = mlx5_dev_infos_get,
+ .representor_info_get = mlx5_representor_info_get,
.read_clock = mlx5_txpp_read_clock,
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
*/
int
mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
- struct mlx5_dev_config *config)
+ struct mlx5_dev_config *config,
+ struct rte_device *dpdk_dev)
{
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_dev_config *sh_conf = NULL;
if (sh->refcnt == 1)
return 0;
/* Find the device with shared context. */
- MLX5_ETH_FOREACH_DEV(port_id, NULL) {
+ MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
(dev->device == odev ||
(dev->device->driver &&
dev->device->driver->name &&
- !strcmp(dev->device->driver->name, MLX5_PCI_DRIVER_NAME))))
+ ((strcmp(dev->device->driver->name,
+ MLX5_PCI_DRIVER_NAME) == 0) ||
+ (strcmp(dev->device->driver->name,
+ MLX5_AUXILIARY_DRIVER_NAME) == 0)))))
break;
port_id++;
}
}
/**
- * DPDK callback to remove a PCI device.
+ * Callback to remove a device.
*
- * This function removes all Ethernet devices belong to a given PCI device.
+ * This function removes all Ethernet devices belong to a given device.
*
- * @param[in] pci_dev
- * Pointer to the PCI device.
+ * @param[in] dev
+ * Pointer to the generic device.
*
* @return
* 0 on success, the function cannot fail.
*/
static int
-mlx5_pci_remove(struct rte_pci_device *pci_dev)
+mlx5_net_remove(struct rte_device *dev)
{
uint16_t port_id;
int ret = 0;
- RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+ RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
/*
* mlx5_dev_close() is not registered to secondary process,
* call the close function explicitly for secondary process.
}
};
-static struct mlx5_pci_driver mlx5_driver = {
- .driver_class = MLX5_CLASS_ETH,
- .pci_driver = {
- .driver = {
- .name = MLX5_PCI_DRIVER_NAME,
- },
- .id_table = mlx5_pci_id_map,
- .probe = mlx5_os_pci_probe,
- .remove = mlx5_pci_remove,
- .dma_map = mlx5_dma_map,
- .dma_unmap = mlx5_dma_unmap,
- .drv_flags = PCI_DRV_FLAGS,
- },
+static struct mlx5_class_driver mlx5_net_driver = {
+ .drv_class = MLX5_CLASS_ETH,
+ .name = RTE_STR(MLX5_ETH_DRIVER_NAME),
+ .id_table = mlx5_pci_id_map,
+ .probe = mlx5_os_net_probe,
+ .remove = mlx5_net_remove,
+ .dma_map = mlx5_net_dma_map,
+ .dma_unmap = mlx5_net_dma_unmap,
+ .probe_again = 1,
+ .intr_lsc = 1,
+ .intr_rmv = 1,
};
/* Initialize driver log type. */
mlx5_set_cksum_table();
mlx5_set_swp_types_table();
if (mlx5_glue)
- mlx5_pci_driver_register(&mlx5_driver);
+ mlx5_class_driver_register(&mlx5_net_driver);
}
-RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
-RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
+RTE_PMD_EXPORT_NAME(MLX5_ETH_DRIVER_NAME, __COUNTER__);
+RTE_PMD_REGISTER_PCI_TABLE(MLX5_ETH_DRIVER_NAME, mlx5_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(MLX5_ETH_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");