X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5.c;h=8cbfee1bac76da7ec35220eae817becf1db5484f;hb=1b4ce87dc5e6;hp=30d4e70a77a1b7069846b3fe4b540b491e65ab21;hpb=f9de87187b7f233cc5b1ea964c05311dfeed951a;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 30d4e70a77..8cbfee1bac 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -46,6 +46,7 @@ #include "mlx5_defs.h" #include "mlx5_glue.h" #include "mlx5_mr.h" +#include "mlx5_flow.h" /* Device parameter to enable RX completion queue compression. */ #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" @@ -89,6 +90,9 @@ /* Allow L3 VXLAN flow creation. */ #define MLX5_L3_VXLAN_EN "l3_vxlan_en" +/* Activate DV flow steering. */ +#define MLX5_DV_FLOW_EN "dv_flow_en" + /* Activate Netlink support in VF mode. */ #define MLX5_VF_NL_EN "vf_nl_en" @@ -283,7 +287,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (priv->nl_socket_rdma >= 0) close(priv->nl_socket_rdma); if (priv->mnl_socket) - mlx5_nl_flow_socket_destroy(priv->mnl_socket); + mlx5_flow_tcf_socket_destroy(priv->mnl_socket); ret = mlx5_hrxq_ibv_verify(dev); if (ret) DRV_LOG(WARNING, "port %u some hash Rx queue still remain", @@ -490,6 +494,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->l3_vxlan_en = !!tmp; } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { config->vf_nl_en = !!tmp; + } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { + config->dv_flow_en = !!tmp; } else { DRV_LOG(WARNING, "%s: unknown parameter", key); rte_errno = EINVAL; @@ -527,6 +533,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) MLX5_RX_VEC_EN, MLX5_L3_VXLAN_EN, MLX5_VF_NL_EN, + MLX5_DV_FLOW_EN, MLX5_REPRESENTOR, NULL, }; @@ -568,11 +575,13 @@ static struct rte_pci_driver mlx5_driver; static void *uar_base; static int -find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused, +find_lower_va_bound(const struct rte_memseg_list *msl, const struct rte_memseg *ms, void *arg) { void **addr = arg; + if (msl->external) + return 0; if (*addr == NULL) *addr = ms->addr; else @@ -1081,7 +1090,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, priv->dev_data = eth_dev->data; eth_dev->data->mac_addrs = priv->mac; eth_dev->device = dpdk_dev; - eth_dev->device->driver = &mlx5_driver.driver; err = mlx5_uar_init_primary(eth_dev); if (err) { err = rte_errno; @@ -1130,7 +1138,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); if (vf && config.vf_nl_en) mlx5_nl_mac_addr_sync(eth_dev); - priv->mnl_socket = mlx5_nl_flow_socket_create(); + priv->mnl_socket = mlx5_flow_tcf_socket_create(); if (!priv->mnl_socket) { err = -rte_errno; DRV_LOG(WARNING, @@ -1146,7 +1154,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, error.message = "cannot retrieve network interface index"; } else { - err = mlx5_nl_flow_init(priv->mnl_socket, ifindex, + err = mlx5_flow_tcf_init(priv->mnl_socket, ifindex, &error); } if (err) { @@ -1154,7 +1162,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, "flow rules relying on switch offloads will" " not be supported: %s: %s", error.message, strerror(rte_errno)); - mlx5_nl_flow_socket_destroy(priv->mnl_socket); + mlx5_flow_tcf_socket_destroy(priv->mnl_socket); priv->mnl_socket = NULL; } } @@ -1211,7 +1219,7 @@ error: if (priv->nl_socket_rdma >= 0) close(priv->nl_socket_rdma); if (priv->mnl_socket) - mlx5_nl_flow_socket_destroy(priv->mnl_socket); + mlx5_flow_tcf_socket_destroy(priv->mnl_socket); if (own_domain_id) claim_zero(rte_eth_switch_domain_free(priv->domain_id)); rte_free(priv); @@ -1488,6 +1496,10 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) + }, { .vendor_id = 0 }