ea8959940f094825a4d20fb705236680b22cef7f
[dpdk.git] / drivers / net / mlx5 / mlx5_vlan.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <unistd.h>
10
11 #include <rte_ethdev_driver.h>
12 #include <rte_common.h>
13 #include <rte_malloc.h>
14 #include <rte_hypervisor.h>
15
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_nl.h>
19 #include <mlx5_malloc.h>
20
21 #include "mlx5.h"
22 #include "mlx5_autoconf.h"
23 #include "mlx5_rxtx.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_devx.h"
26
27 /**
28  * DPDK callback to configure a VLAN filter.
29  *
30  * @param dev
31  *   Pointer to Ethernet device structure.
32  * @param vlan_id
33  *   VLAN ID to filter.
34  * @param on
35  *   Toggle filter.
36  *
37  * @return
38  *   0 on success, a negative errno value otherwise and rte_errno is set.
39  */
40 int
41 mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
42 {
43         struct mlx5_priv *priv = dev->data->dev_private;
44         unsigned int i;
45
46         DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
47                 dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
48         MLX5_ASSERT(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
49         for (i = 0; (i != priv->vlan_filter_n); ++i)
50                 if (priv->vlan_filter[i] == vlan_id)
51                         break;
52         /* Check if there's room for another VLAN filter. */
53         if (i == RTE_DIM(priv->vlan_filter)) {
54                 rte_errno = ENOMEM;
55                 return -rte_errno;
56         }
57         if (i < priv->vlan_filter_n) {
58                 MLX5_ASSERT(priv->vlan_filter_n != 0);
59                 /* Enabling an existing VLAN filter has no effect. */
60                 if (on)
61                         goto out;
62                 /* Remove VLAN filter from list. */
63                 --priv->vlan_filter_n;
64                 memmove(&priv->vlan_filter[i],
65                         &priv->vlan_filter[i + 1],
66                         sizeof(priv->vlan_filter[i]) *
67                         (priv->vlan_filter_n - i));
68                 priv->vlan_filter[priv->vlan_filter_n] = 0;
69         } else {
70                 MLX5_ASSERT(i == priv->vlan_filter_n);
71                 /* Disabling an unknown VLAN filter has no effect. */
72                 if (!on)
73                         goto out;
74                 /* Add new VLAN filter. */
75                 priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
76                 ++priv->vlan_filter_n;
77         }
78 out:
79         if (dev->data->dev_started)
80                 return mlx5_traffic_restart(dev);
81         return 0;
82 }
83
84 /**
85  * Callback to set/reset VLAN stripping for a specific queue.
86  *
87  * @param dev
88  *   Pointer to Ethernet device structure.
89  * @param queue
90  *   RX queue index.
91  * @param on
92  *   Enable/disable VLAN stripping.
93  */
94 void
95 mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
96 {
97         struct mlx5_priv *priv = dev->data->dev_private;
98         struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
99         struct mlx5_rxq_ctrl *rxq_ctrl =
100                 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
101         int ret = 0;
102
103         /* Validate hw support */
104         if (!priv->config.hw_vlan_strip) {
105                 DRV_LOG(ERR, "port %u VLAN stripping is not supported",
106                         dev->data->port_id);
107                 return;
108         }
109         /* Validate queue number */
110         if (queue >= priv->rxqs_n) {
111                 DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
112                         dev->data->port_id, queue);
113                 return;
114         }
115         DRV_LOG(DEBUG, "port %u set VLAN stripping offloads %d for port %uqueue %d",
116                 dev->data->port_id, on, rxq->port_id, queue);
117         if (!rxq_ctrl->obj) {
118                 /* Update related bits in RX queue. */
119                 rxq->vlan_strip = !!on;
120                 return;
121         }
122         ret = priv->obj_ops->rxq_obj_modify_vlan_strip(rxq_ctrl->obj, on);
123         if (ret) {
124                 DRV_LOG(ERR, "port %u failed to modify object %d stripping "
125                         "mode: %s", dev->data->port_id,
126                         rxq_ctrl->obj->type, strerror(rte_errno));
127                 return;
128         }
129         /* Update related bits in RX queue. */
130         rxq->vlan_strip = !!on;
131 }
132
133 /**
134  * Callback to set/reset VLAN offloads for a port.
135  *
136  * @param dev
137  *   Pointer to Ethernet device structure.
138  * @param mask
139  *   VLAN offload bit mask.
140  *
141  * @return
142  *   0 on success, a negative errno value otherwise and rte_errno is set.
143  */
144 int
145 mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
146 {
147         struct mlx5_priv *priv = dev->data->dev_private;
148         unsigned int i;
149
150         if (mask & ETH_VLAN_STRIP_MASK) {
151                 int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
152                                        DEV_RX_OFFLOAD_VLAN_STRIP);
153
154                 if (!priv->config.hw_vlan_strip) {
155                         DRV_LOG(ERR, "port %u VLAN stripping is not supported",
156                                 dev->data->port_id);
157                         return 0;
158                 }
159                 /* Run on every RX queue and set/reset VLAN stripping. */
160                 for (i = 0; (i != priv->rxqs_n); i++)
161                         mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip);
162         }
163         return 0;
164 }
165
166 /*
167  * Release VLAN network device, created for VM workaround.
168  *
169  * @param[in] dev
170  *   Ethernet device object, Netlink context provider.
171  * @param[in] vlan
172  *   Object representing the network device to release.
173  */
174 void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
175                             struct mlx5_vf_vlan *vlan)
176 {
177         struct mlx5_priv *priv = dev->data->dev_private;
178         struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
179         struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
180
181         MLX5_ASSERT(vlan->created);
182         MLX5_ASSERT(priv->vmwa_context);
183         if (!vlan->created || !vmwa)
184                 return;
185         vlan->created = 0;
186         MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
187         if (--vlan_dev[vlan->tag].refcnt == 0 &&
188             vlan_dev[vlan->tag].ifindex) {
189                 mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
190                 vlan_dev[vlan->tag].ifindex = 0;
191         }
192 }
193
194 /**
195  * Acquire VLAN interface with specified tag for VM workaround.
196  *
197  * @param[in] dev
198  *   Ethernet device object, Netlink context provider.
199  * @param[in] vlan
200  *   Object representing the network device to acquire.
201  */
202 void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
203                             struct mlx5_vf_vlan *vlan)
204 {
205         struct mlx5_priv *priv = dev->data->dev_private;
206         struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
207         struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
208
209         MLX5_ASSERT(!vlan->created);
210         MLX5_ASSERT(priv->vmwa_context);
211         if (vlan->created || !vmwa)
212                 return;
213         if (vlan_dev[vlan->tag].refcnt == 0) {
214                 MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
215                 vlan_dev[vlan->tag].ifindex =
216                         mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
217                                                  vlan->tag);
218         }
219         if (vlan_dev[vlan->tag].ifindex) {
220                 vlan_dev[vlan->tag].refcnt++;
221                 vlan->created = 1;
222         }
223 }
224
225 /*
226  * Create per ethernet device VLAN VM workaround context
227  */
228 struct mlx5_nl_vlan_vmwa_context *
229 mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
230 {
231         struct mlx5_priv *priv = dev->data->dev_private;
232         struct mlx5_dev_config *config = &priv->config;
233         struct mlx5_nl_vlan_vmwa_context *vmwa;
234         enum rte_hypervisor hv_type;
235
236         /* Do not engage workaround over PF. */
237         if (!config->vf)
238                 return NULL;
239         /* Check whether there is desired virtual environment */
240         hv_type = rte_hypervisor_get();
241         switch (hv_type) {
242         case RTE_HYPERVISOR_UNKNOWN:
243         case RTE_HYPERVISOR_VMWARE:
244                 /*
245                  * The "white list" of configurations
246                  * to engage the workaround.
247                  */
248                 break;
249         default:
250                 /*
251                  * The configuration is not found in the "white list".
252                  * We should not engage the VLAN workaround.
253                  */
254                 return NULL;
255         }
256         vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
257                            SOCKET_ID_ANY);
258         if (!vmwa) {
259                 DRV_LOG(WARNING,
260                         "Can not allocate memory"
261                         " for VLAN workaround context");
262                 return NULL;
263         }
264         vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
265         if (vmwa->nl_socket < 0) {
266                 DRV_LOG(WARNING,
267                         "Can not create Netlink socket"
268                         " for VLAN workaround context");
269                 mlx5_free(vmwa);
270                 return NULL;
271         }
272         vmwa->vf_ifindex = ifindex;
273         /* Cleanup for existing VLAN devices. */
274         return vmwa;
275 }
276
277 /*
278  * Destroy per ethernet device VLAN VM workaround context
279  */
280 void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *vmwa)
281 {
282         unsigned int i;
283
284         /* Delete all remaining VLAN devices. */
285         for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
286                 if (vmwa->vlan_dev[i].ifindex)
287                         mlx5_nl_vlan_vmwa_delete(vmwa,
288                                                  vmwa->vlan_dev[i].ifindex);
289         }
290         if (vmwa->nl_socket >= 0)
291                 close(vmwa->nl_socket);
292         mlx5_free(vmwa);
293 }