net/ngbe: support Rx/Tx burst mode info
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 2993f7e..4fe7e34 100644 (file)
@@ -1127,28 +1127,8 @@ exit:
 }
 
 /**
- * Unregister the mempool from the protection domain.
- *
- * @param sh
- *   Pointer to the device shared context.
- * @param mp
- *   Mempool being unregistered.
- */
-static void
-mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
-                                      struct rte_mempool *mp)
-{
-       struct mlx5_mp_id mp_id;
-
-       mlx5_mp_id_init(&mp_id, 0);
-       if (mlx5_mr_mempool_unregister(&sh->cdev->mr_scache, mp, &mp_id) < 0)
-               DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
-                       mp->name, sh->cdev->pd, rte_strerror(rte_errno));
-}
-
-/**
- * rte_mempool_walk() callback to register mempools
- * for the protection domain.
+ * rte_mempool_walk() callback to unregister Rx mempools.
+ * It used when implicit mempool registration is disabled.
  *
  * @param mp
  *   The mempool being walked.
@@ -1156,66 +1136,11 @@ mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh,
  *   Pointer to the device shared context.
  */
 static void
-mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg)
+mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
 {
        struct mlx5_dev_ctx_shared *sh = arg;
-       struct mlx5_mp_id mp_id;
-       int ret;
-
-       mlx5_mp_id_init(&mp_id, 0);
-       ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd, mp,
-                                      &mp_id);
-       if (ret < 0 && rte_errno != EEXIST)
-               DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
-                       mp->name, sh->cdev->pd, rte_strerror(rte_errno));
-}
-
-/**
- * rte_mempool_walk() callback to unregister mempools
- * from the protection domain.
- *
- * @param mp
- *   The mempool being walked.
- * @param arg
- *   Pointer to the device shared context.
- */
-static void
-mlx5_dev_ctx_shared_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
-{
-       mlx5_dev_ctx_shared_mempool_unregister
-                               ((struct mlx5_dev_ctx_shared *)arg, mp);
-}
 
-/**
- * Mempool life cycle callback for Ethernet devices.
- *
- * @param event
- *   Mempool life cycle event.
- * @param mp
- *   Associated mempool.
- * @param arg
- *   Pointer to a device shared context.
- */
-static void
-mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event,
-                                    struct rte_mempool *mp, void *arg)
-{
-       struct mlx5_dev_ctx_shared *sh = arg;
-       struct mlx5_mp_id mp_id;
-
-       switch (event) {
-       case RTE_MEMPOOL_EVENT_READY:
-               mlx5_mp_id_init(&mp_id, 0);
-               if (mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd,
-                                            mp, &mp_id) < 0)
-                       DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
-                               mp->name, sh->cdev->pd,
-                               rte_strerror(rte_errno));
-               break;
-       case RTE_MEMPOOL_EVENT_DESTROY:
-               mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
-               break;
-       }
+       mlx5_dev_mempool_unregister(sh->cdev, mp);
 }
 
 /**
@@ -1236,7 +1161,7 @@ mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
        struct mlx5_dev_ctx_shared *sh = arg;
 
        if (event == RTE_MEMPOOL_EVENT_DESTROY)
-               mlx5_dev_ctx_shared_mempool_unregister(sh, mp);
+               mlx5_dev_mempool_unregister(sh->cdev, mp);
 }
 
 int
@@ -1252,14 +1177,68 @@ mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
                                (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
                return ret == 0 || rte_errno == EEXIST ? 0 : ret;
        }
-       /* Callback for this shared context may be already registered. */
-       ret = rte_mempool_event_callback_register
-                               (mlx5_dev_ctx_shared_mempool_event_cb, sh);
-       if (ret != 0 && rte_errno != EEXIST)
-               return ret;
-       /* Register mempools only once for this shared context. */
-       if (ret == 0)
-               rte_mempool_walk(mlx5_dev_ctx_shared_mempool_register_cb, sh);
+       return mlx5_dev_mempool_subscribe(sh->cdev);
+}
+
+/**
+ * Set up multiple TISs with different affinities according to
+ * number of bonding ports
+ *
+ * @param priv
+ * Pointer of shared context.
+ *
+ * @return
+ * Zero on success, -1 otherwise.
+ */
+static int
+mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
+{
+       int i;
+       struct mlx5_devx_lag_context lag_ctx = { 0 };
+       struct mlx5_devx_tis_attr tis_attr = { 0 };
+
+       tis_attr.transport_domain = sh->td->id;
+       if (sh->bond.n_port) {
+               if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) {
+                       sh->lag.tx_remap_affinity[0] =
+                               lag_ctx.tx_remap_affinity_1;
+                       sh->lag.tx_remap_affinity[1] =
+                               lag_ctx.tx_remap_affinity_2;
+                       sh->lag.affinity_mode = lag_ctx.port_select_mode;
+               } else {
+                       DRV_LOG(ERR, "Failed to query lag affinity.");
+                       return -1;
+               }
+               if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) {
+                       for (i = 0; i < sh->bond.n_port; i++) {
+                               tis_attr.lag_tx_port_affinity =
+                                       MLX5_IFC_LAG_MAP_TIS_AFFINITY(i,
+                                                       sh->bond.n_port);
+                               sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx,
+                                               &tis_attr);
+                               if (!sh->tis[i]) {
+                                       DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device"
+                                               " %s.", i, sh->bond.n_port,
+                                               sh->ibdev_name);
+                                       return -1;
+                               }
+                       }
+                       DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n",
+                               sh->bond.n_port, lag_ctx.tx_remap_affinity_1,
+                               lag_ctx.tx_remap_affinity_2);
+                       return 0;
+               }
+               if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)
+                       DRV_LOG(INFO, "Device %s enabled HW hash based LAG.",
+                                       sh->ibdev_name);
+       }
+       tis_attr.lag_tx_port_affinity = 0;
+       sh->tis[0] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
+       if (!sh->tis[0]) {
+               DRV_LOG(ERR, "Failed to TIS 0 for bonding device"
+                       " %s.", sh->ibdev_name);
+               return -1;
+       }
        return 0;
 }
 
@@ -1290,7 +1269,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
        struct mlx5_dev_ctx_shared *sh;
        int err = 0;
        uint32_t i;
-       struct mlx5_devx_tis_attr tis_attr = { 0 };
 
        MLX5_ASSERT(spawn);
        /* Secondary process should not create the shared context. */
@@ -1349,9 +1327,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                        err = ENOMEM;
                        goto error;
                }
-               tis_attr.transport_domain = sh->td->id;
-               sh->tis = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
-               if (!sh->tis) {
+               if (mlx5_setup_tis(sh)) {
                        DRV_LOG(ERR, "TIS allocation failure");
                        err = ENOMEM;
                        goto error;
@@ -1390,10 +1366,13 @@ error:
        pthread_mutex_destroy(&sh->txpp.mutex);
        pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
        MLX5_ASSERT(sh);
-       if (sh->tis)
-               claim_zero(mlx5_devx_cmd_destroy(sh->tis));
        if (sh->td)
                claim_zero(mlx5_devx_cmd_destroy(sh->td));
+       i = 0;
+       do {
+               if (sh->tis[i])
+                       claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+       } while (++i < (uint32_t)sh->bond.n_port);
        if (sh->devx_rx_uar)
                mlx5_glue->devx_free_uar(sh->devx_rx_uar);
        if (sh->tx_uar)
@@ -1415,6 +1394,7 @@ void
 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 {
        int ret;
+       int i = 0;
 
        pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
@@ -1437,19 +1417,20 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
        if (--sh->refcnt)
                goto exit;
        /* Stop watching for mempool events and unregister all mempools. */
-       ret = rte_mempool_event_callback_unregister
-                               (mlx5_dev_ctx_shared_mempool_event_cb, sh);
-       if (ret < 0 && rte_errno == ENOENT)
+       if (!sh->cdev->config.mr_mempool_reg_en) {
                ret = rte_mempool_event_callback_unregister
                                (mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
-       if (ret == 0)
-               rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
-                                sh);
+               if (ret == 0)
+                       rte_mempool_walk
+                            (mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
+       }
        /* Remove context from the global device list. */
        LIST_REMOVE(sh, next);
-       /* Release flow workspaces objects on the last device. */
-       if (LIST_EMPTY(&mlx5_dev_ctx_list))
+       /* Release resources on the last device removal. */
+       if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
+               mlx5_os_net_cleanup();
                mlx5_flow_os_release_workspace();
+       }
        pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
        /*
         *  Ensure there is no async event handler installed.
@@ -1468,8 +1449,10 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                mlx5_glue->devx_free_uar(sh->tx_uar);
                sh->tx_uar = NULL;
        }
-       if (sh->tis)
-               claim_zero(mlx5_devx_cmd_destroy(sh->tis));
+       do {
+               if (sh->tis[i])
+                       claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
+       } while (++i < sh->bond.n_port);
        if (sh->td)
                claim_zero(mlx5_devx_cmd_destroy(sh->td));
        if (sh->devx_rx_uar)
@@ -1593,10 +1576,10 @@ mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
                         struct rte_eth_udp_tunnel *udp_tunnel)
 {
        MLX5_ASSERT(udp_tunnel != NULL);
-       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
+       if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
            udp_tunnel->udp_port == 4789)
                return 0;
-       if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE &&
+       if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
            udp_tunnel->udp_port == 4790)
                return 0;
        return -ENOTSUP;