net/mlx5: fix representors detection
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 1b393c3..a1c0ad7 100644 (file)
@@ -242,6 +242,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        /* In case mlx5_dev_stop() has not been called. */
        mlx5_dev_interrupt_handler_uninstall(dev);
        mlx5_traffic_disable(dev);
+       mlx5_flow_flush(dev, NULL);
        /* Prevent crashes when queues are still in use. */
        dev->rx_pkt_burst = removed_rx_burst;
        dev->tx_pkt_burst = removed_tx_burst;
@@ -261,7 +262,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                priv->txqs_n = 0;
                priv->txqs = NULL;
        }
-       mlx5_flow_delete_drop_queue(dev);
        mlx5_mprq_free_mp(dev);
        mlx5_mr_release(dev);
        if (priv->pd != NULL) {
@@ -282,6 +282,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
                close(priv->nl_socket_route);
        if (priv->nl_socket_rdma >= 0)
                close(priv->nl_socket_rdma);
+       if (priv->mnl_socket)
+               mlx5_nl_flow_socket_destroy(priv->mnl_socket);
        ret = mlx5_hrxq_ibv_verify(dev);
        if (ret)
                DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
@@ -598,7 +600,7 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
        rte_memseg_walk(find_lower_va_bound, &addr);
 
        /* keep distance to hugepages to minimize potential conflicts. */
-       addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
+       addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
        /* anonymous mmap, no real memory consumption. */
        addr = mmap(addr, MLX5_UAR_SIZE,
                    PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -718,7 +720,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        unsigned int tunnel_en = 0;
        unsigned int mpls_en = 0;
        unsigned int swp = 0;
-       unsigned int verb_priorities = 0;
        unsigned int mprq = 0;
        unsigned int mprq_min_stride_size_n = 0;
        unsigned int mprq_max_stride_size_n = 0;
@@ -940,6 +941,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        priv->device_attr = attr;
        priv->pd = pd;
        priv->mtu = ETHER_MTU;
+#ifndef RTE_ARCH_64
+       /* Initialize UAR access locks for 32bit implementations. */
+       rte_spinlock_init(&priv->uar_lock_cq);
+       for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+               rte_spinlock_init(&priv->uar_lock[i]);
+#endif
        /* Some internal functions rely on Netlink sockets, open them now. */
        priv->nl_socket_rdma = mlx5_nl_init(0, NETLINK_RDMA);
        priv->nl_socket_route = mlx5_nl_init(RTMGRP_LINK, NETLINK_ROUTE);
@@ -1117,6 +1124,34 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
        if (vf && config.vf_nl_en)
                mlx5_nl_mac_addr_sync(eth_dev);
+       priv->mnl_socket = mlx5_nl_flow_socket_create();
+       if (!priv->mnl_socket) {
+               err = -rte_errno;
+               DRV_LOG(WARNING,
+                       "flow rules relying on switch offloads will not be"
+                       " supported: cannot open libmnl socket: %s",
+                       strerror(rte_errno));
+       } else {
+               struct rte_flow_error error;
+               unsigned int ifindex = mlx5_ifindex(eth_dev);
+
+               if (!ifindex) {
+                       err = -rte_errno;
+                       error.message =
+                               "cannot retrieve network interface index";
+               } else {
+                       err = mlx5_nl_flow_init(priv->mnl_socket, ifindex,
+                                               &error);
+               }
+               if (err) {
+                       DRV_LOG(WARNING,
+                               "flow rules relying on switch offloads will"
+                               " not be supported: %s: %s",
+                               error.message, strerror(rte_errno));
+                       mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+                       priv->mnl_socket = NULL;
+               }
+       }
        TAILQ_INIT(&priv->flows);
        TAILQ_INIT(&priv->ctrl_flows);
        /* Hint libmlx5 to use PMD allocator for data plane resources */
@@ -1139,24 +1174,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        mlx5_link_update(eth_dev, 0);
        /* Store device configuration on private structure. */
        priv->config = config;
-       /* Create drop queue. */
-       err = mlx5_flow_create_drop_queue(eth_dev);
-       if (err) {
-               DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
-                       eth_dev->data->port_id, strerror(rte_errno));
-               err = rte_errno;
-               goto error;
-       }
        /* Supported Verbs flow priority number detection. */
-       if (verb_priorities == 0)
-               verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
-       if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
-               DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
-                       eth_dev->data->port_id, verb_priorities);
-               err = ENOTSUP;
+       err = mlx5_flow_discover_priorities(eth_dev);
+       if (err < 0)
                goto error;
-       }
-       priv->config.max_verbs_prio = verb_priorities;
+       priv->config.flow_prio = err;
        /*
         * Once the device is added to the list of memory event
         * callback, its global MR cache table cannot be expanded
@@ -1182,6 +1204,8 @@ error:
                        close(priv->nl_socket_route);
                if (priv->nl_socket_rdma >= 0)
                        close(priv->nl_socket_rdma);
+               if (priv->mnl_socket)
+                       mlx5_nl_flow_socket_destroy(priv->mnl_socket);
                if (own_domain_id)
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
                rte_free(priv);
@@ -1306,7 +1330,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
         * Netlink calls assuming kernel drivers are recent enough to
         * support them.
         *
-        * In the event of identification failure through Netlink, either:
+        * In the event of identification failure through Netlink, try again
+        * through sysfs, then either:
         *
         * 1. No device matches (n == 0), complain and bail out.
         * 2. A single IB device matches (n == 1) and is not a representor,
@@ -1325,7 +1350,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                if (nl_route < 0 ||
                    !list[i].ifindex ||
                    mlx5_nl_switch_info(nl_route, list[i].ifindex,
-                                       &list[i].info)) {
+                                       &list[i].info) ||
+                   ((!list[i].info.representor && !list[i].info.master) &&
+                    mlx5_sysfs_switch_info(list[i].ifindex, &list[i].info))) {
                        list[i].ifindex = 0;
                        memset(&list[i].info, 0, sizeof(list[i].info));
                        continue;