net/mlx5: replace flow list with indexed pool
[dpdk.git] / drivers / net / mlx5 / linux / mlx5_vlan_os.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <unistd.h>
10
11 /*
12  * Not needed by this file; included to work around the lack of off_t
13  * definition for mlx5dv.h with unpatched rdma-core versions.
14  */
15 #include <sys/types.h>
16
17 #include <ethdev_driver.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_hypervisor.h>
21
22 #include <mlx5.h>
23 #include <mlx5_nl.h>
24 #include <mlx5_malloc.h>
25
26 /*
27  * Release VLAN network device, created for VM workaround.
28  *
29  * @param[in] dev
30  *   Ethernet device object, Netlink context provider.
31  * @param[in] vlan
32  *   Object representing the network device to release.
33  */
34 void
35 mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
36                             struct mlx5_vf_vlan *vlan)
37 {
38         struct mlx5_priv *priv = dev->data->dev_private;
39         struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
40         struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
41
42         MLX5_ASSERT(vlan->created);
43         MLX5_ASSERT(priv->vmwa_context);
44         if (!vlan->created || !vmwa)
45                 return;
46         vlan->created = 0;
47         rte_spinlock_lock(&vmwa->sl);
48         MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
49         if (--vlan_dev[vlan->tag].refcnt == 0 &&
50             vlan_dev[vlan->tag].ifindex) {
51                 mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
52                 vlan_dev[vlan->tag].ifindex = 0;
53         }
54         rte_spinlock_unlock(&vmwa->sl);
55 }
56
57 /**
58  * Acquire VLAN interface with specified tag for VM workaround.
59  *
60  * @param[in] dev
61  *   Ethernet device object, Netlink context provider.
62  * @param[in] vlan
63  *   Object representing the network device to acquire.
64  */
65 void
66 mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
67                             struct mlx5_vf_vlan *vlan)
68 {
69         struct mlx5_priv *priv = dev->data->dev_private;
70         struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
71         struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
72
73         MLX5_ASSERT(!vlan->created);
74         MLX5_ASSERT(priv->vmwa_context);
75         if (vlan->created || !vmwa)
76                 return;
77         rte_spinlock_lock(&vmwa->sl);
78         if (vlan_dev[vlan->tag].refcnt == 0) {
79                 MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
80                 vlan_dev[vlan->tag].ifindex =
81                         mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
82                                                  vlan->tag);
83         }
84         if (vlan_dev[vlan->tag].ifindex) {
85                 vlan_dev[vlan->tag].refcnt++;
86                 vlan->created = 1;
87         }
88         rte_spinlock_unlock(&vmwa->sl);
89 }
90
91 /*
92  * Create per ethernet device VLAN VM workaround context
93  *
94  * @param dev
95  *   Pointer to Ethernet device structure.
96  * @param ifindex
97  *   Interface index.
98  *
99  * @Return
100  *   Pointer to mlx5_nl_vlan_vmwa_context
101  */
102 void *
103 mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
104 {
105         struct mlx5_priv *priv = dev->data->dev_private;
106         struct mlx5_dev_config *config = &priv->config;
107         struct mlx5_nl_vlan_vmwa_context *vmwa;
108         enum rte_hypervisor hv_type;
109
110         /* Do not engage workaround over PF. */
111         if (!config->vf)
112                 return NULL;
113         /* Check whether there is desired virtual environment */
114         hv_type = rte_hypervisor_get();
115         switch (hv_type) {
116         case RTE_HYPERVISOR_UNKNOWN:
117         case RTE_HYPERVISOR_VMWARE:
118                 /*
119                  * The "white list" of configurations
120                  * to engage the workaround.
121                  */
122                 break;
123         default:
124                 /*
125                  * The configuration is not found in the "white list".
126                  * We should not engage the VLAN workaround.
127                  */
128                 return NULL;
129         }
130         vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
131                            SOCKET_ID_ANY);
132         if (!vmwa) {
133                 DRV_LOG(WARNING,
134                         "Can not allocate memory"
135                         " for VLAN workaround context");
136                 return NULL;
137         }
138         rte_spinlock_init(&vmwa->sl);
139         vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
140         if (vmwa->nl_socket < 0) {
141                 DRV_LOG(WARNING,
142                         "Can not create Netlink socket"
143                         " for VLAN workaround context");
144                 mlx5_free(vmwa);
145                 return NULL;
146         }
147         vmwa->vf_ifindex = ifindex;
148         /* Cleanup for existing VLAN devices. */
149         return vmwa;
150 }
151
152 /*
153  * Destroy per ethernet device VLAN VM workaround context
154  *
155  * @param dev
156  *   Pointer to VM context
157  */
158 void
159 mlx5_vlan_vmwa_exit(void *vmctx)
160 {
161         unsigned int i;
162
163         struct mlx5_nl_vlan_vmwa_context *vmwa = vmctx;
164         /* Delete all remaining VLAN devices. */
165         for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
166                 if (vmwa->vlan_dev[i].ifindex)
167                         mlx5_nl_vlan_vmwa_delete(vmwa,
168                                                  vmwa->vlan_dev[i].ifindex);
169         }
170         if (vmwa->nl_socket >= 0)
171                 close(vmwa->nl_socket);
172         mlx5_free(vmwa);
173 }