introduce restricted pointer aliasing marker
[dpdk.git] / drivers / net / mlx5 / mlx5_vlan.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5
6 #include <stddef.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <unistd.h>
10
11
12 /*
13  * Not needed by this file; included to work around the lack of off_t
14  * definition for mlx5dv.h with unpatched rdma-core versions.
15  */
16 #include <sys/types.h>
17
18 /* Verbs headers do not support -pedantic. */
19 #ifdef PEDANTIC
20 #pragma GCC diagnostic ignored "-Wpedantic"
21 #endif
22 #include <infiniband/mlx5dv.h>
23 #include <infiniband/verbs.h>
24 #ifdef PEDANTIC
25 #pragma GCC diagnostic error "-Wpedantic"
26 #endif
27
28 #include <rte_ethdev_driver.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_hypervisor.h>
32
33 #include <mlx5_glue.h>
34 #include <mlx5_devx_cmds.h>
35 #include <mlx5_nl.h>
36
37 #include "mlx5.h"
38 #include "mlx5_autoconf.h"
39 #include "mlx5_rxtx.h"
40 #include "mlx5_utils.h"
41
42 /**
43  * DPDK callback to configure a VLAN filter.
44  *
45  * @param dev
46  *   Pointer to Ethernet device structure.
47  * @param vlan_id
48  *   VLAN ID to filter.
49  * @param on
50  *   Toggle filter.
51  *
52  * @return
53  *   0 on success, a negative errno value otherwise and rte_errno is set.
54  */
55 int
56 mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
57 {
58         struct mlx5_priv *priv = dev->data->dev_private;
59         unsigned int i;
60
61         DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
62                 dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
63         MLX5_ASSERT(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
64         for (i = 0; (i != priv->vlan_filter_n); ++i)
65                 if (priv->vlan_filter[i] == vlan_id)
66                         break;
67         /* Check if there's room for another VLAN filter. */
68         if (i == RTE_DIM(priv->vlan_filter)) {
69                 rte_errno = ENOMEM;
70                 return -rte_errno;
71         }
72         if (i < priv->vlan_filter_n) {
73                 MLX5_ASSERT(priv->vlan_filter_n != 0);
74                 /* Enabling an existing VLAN filter has no effect. */
75                 if (on)
76                         goto out;
77                 /* Remove VLAN filter from list. */
78                 --priv->vlan_filter_n;
79                 memmove(&priv->vlan_filter[i],
80                         &priv->vlan_filter[i + 1],
81                         sizeof(priv->vlan_filter[i]) *
82                         (priv->vlan_filter_n - i));
83                 priv->vlan_filter[priv->vlan_filter_n] = 0;
84         } else {
85                 MLX5_ASSERT(i == priv->vlan_filter_n);
86                 /* Disabling an unknown VLAN filter has no effect. */
87                 if (!on)
88                         goto out;
89                 /* Add new VLAN filter. */
90                 priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
91                 ++priv->vlan_filter_n;
92         }
93 out:
94         if (dev->data->dev_started)
95                 return mlx5_traffic_restart(dev);
96         return 0;
97 }
98
99 /**
100  * Callback to set/reset VLAN stripping for a specific queue.
101  *
102  * @param dev
103  *   Pointer to Ethernet device structure.
104  * @param queue
105  *   RX queue index.
106  * @param on
107  *   Enable/disable VLAN stripping.
108  */
109 void
110 mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
111 {
112         struct mlx5_priv *priv = dev->data->dev_private;
113         struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
114         struct mlx5_rxq_ctrl *rxq_ctrl =
115                 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
116         struct ibv_wq_attr mod;
117         uint16_t vlan_offloads =
118                 (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
119                 0;
120         int ret = 0;
121
122         /* Validate hw support */
123         if (!priv->config.hw_vlan_strip) {
124                 DRV_LOG(ERR, "port %u VLAN stripping is not supported",
125                         dev->data->port_id);
126                 return;
127         }
128         /* Validate queue number */
129         if (queue >= priv->rxqs_n) {
130                 DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
131                         dev->data->port_id, queue);
132                 return;
133         }
134         DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d",
135                 dev->data->port_id, vlan_offloads, rxq->port_id, queue);
136         if (!rxq_ctrl->obj) {
137                 /* Update related bits in RX queue. */
138                 rxq->vlan_strip = !!on;
139                 return;
140         }
141         if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
142                 mod = (struct ibv_wq_attr){
143                         .attr_mask = IBV_WQ_ATTR_FLAGS,
144                         .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
145                         .flags = vlan_offloads,
146                 };
147                 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
148         } else if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
149                 struct mlx5_devx_modify_rq_attr rq_attr;
150
151                 memset(&rq_attr, 0, sizeof(rq_attr));
152                 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
153                 rq_attr.state = MLX5_RQC_STATE_RDY;
154                 rq_attr.vsd = (on ? 0 : 1);
155                 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
156                 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
157         }
158         if (ret) {
159                 DRV_LOG(ERR, "port %u failed to modify object %d stripping "
160                         "mode: %s", dev->data->port_id,
161                         rxq_ctrl->obj->type, strerror(rte_errno));
162                 return;
163         }
164         /* Update related bits in RX queue. */
165         rxq->vlan_strip = !!on;
166 }
167
168 /**
169  * Callback to set/reset VLAN offloads for a port.
170  *
171  * @param dev
172  *   Pointer to Ethernet device structure.
173  * @param mask
174  *   VLAN offload bit mask.
175  *
176  * @return
177  *   0 on success, a negative errno value otherwise and rte_errno is set.
178  */
179 int
180 mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
181 {
182         struct mlx5_priv *priv = dev->data->dev_private;
183         unsigned int i;
184
185         if (mask & ETH_VLAN_STRIP_MASK) {
186                 int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
187                                        DEV_RX_OFFLOAD_VLAN_STRIP);
188
189                 if (!priv->config.hw_vlan_strip) {
190                         DRV_LOG(ERR, "port %u VLAN stripping is not supported",
191                                 dev->data->port_id);
192                         return 0;
193                 }
194                 /* Run on every RX queue and set/reset VLAN stripping. */
195                 for (i = 0; (i != priv->rxqs_n); i++)
196                         mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip);
197         }
198         return 0;
199 }
200
201 /*
202  * Release VLAN network device, created for VM workaround.
203  *
204  * @param[in] dev
205  *   Ethernet device object, Netlink context provider.
206  * @param[in] vlan
207  *   Object representing the network device to release.
208  */
209 void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
210                             struct mlx5_vf_vlan *vlan)
211 {
212         struct mlx5_priv *priv = dev->data->dev_private;
213         struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
214         struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
215
216         MLX5_ASSERT(vlan->created);
217         MLX5_ASSERT(priv->vmwa_context);
218         if (!vlan->created || !vmwa)
219                 return;
220         vlan->created = 0;
221         MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
222         if (--vlan_dev[vlan->tag].refcnt == 0 &&
223             vlan_dev[vlan->tag].ifindex) {
224                 mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
225                 vlan_dev[vlan->tag].ifindex = 0;
226         }
227 }
228
229 /**
230  * Acquire VLAN interface with specified tag for VM workaround.
231  *
232  * @param[in] dev
233  *   Ethernet device object, Netlink context provider.
234  * @param[in] vlan
235  *   Object representing the network device to acquire.
236  */
237 void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
238                             struct mlx5_vf_vlan *vlan)
239 {
240         struct mlx5_priv *priv = dev->data->dev_private;
241         struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
242         struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
243
244         MLX5_ASSERT(!vlan->created);
245         MLX5_ASSERT(priv->vmwa_context);
246         if (vlan->created || !vmwa)
247                 return;
248         if (vlan_dev[vlan->tag].refcnt == 0) {
249                 MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
250                 vlan_dev[vlan->tag].ifindex =
251                         mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
252                                                  vlan->tag);
253         }
254         if (vlan_dev[vlan->tag].ifindex) {
255                 vlan_dev[vlan->tag].refcnt++;
256                 vlan->created = 1;
257         }
258 }
259
260 /*
261  * Create per ethernet device VLAN VM workaround context
262  */
263 struct mlx5_nl_vlan_vmwa_context *
264 mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
265 {
266         struct mlx5_priv *priv = dev->data->dev_private;
267         struct mlx5_dev_config *config = &priv->config;
268         struct mlx5_nl_vlan_vmwa_context *vmwa;
269         enum rte_hypervisor hv_type;
270
271         /* Do not engage workaround over PF. */
272         if (!config->vf)
273                 return NULL;
274         /* Check whether there is desired virtual environment */
275         hv_type = rte_hypervisor_get();
276         switch (hv_type) {
277         case RTE_HYPERVISOR_UNKNOWN:
278         case RTE_HYPERVISOR_VMWARE:
279                 /*
280                  * The "white list" of configurations
281                  * to engage the workaround.
282                  */
283                 break;
284         default:
285                 /*
286                  * The configuration is not found in the "white list".
287                  * We should not engage the VLAN workaround.
288                  */
289                 return NULL;
290         }
291         vmwa = rte_zmalloc(__func__, sizeof(*vmwa), sizeof(uint32_t));
292         if (!vmwa) {
293                 DRV_LOG(WARNING,
294                         "Can not allocate memory"
295                         " for VLAN workaround context");
296                 return NULL;
297         }
298         vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
299         if (vmwa->nl_socket < 0) {
300                 DRV_LOG(WARNING,
301                         "Can not create Netlink socket"
302                         " for VLAN workaround context");
303                 rte_free(vmwa);
304                 return NULL;
305         }
306         vmwa->vf_ifindex = ifindex;
307         /* Cleanup for existing VLAN devices. */
308         return vmwa;
309 }
310
311 /*
312  * Destroy per ethernet device VLAN VM workaround context
313  */
314 void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *vmwa)
315 {
316         unsigned int i;
317
318         /* Delete all remaining VLAN devices. */
319         for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
320                 if (vmwa->vlan_dev[i].ifindex)
321                         mlx5_nl_vlan_vmwa_delete(vmwa,
322                                                  vmwa->vlan_dev[i].ifindex);
323         }
324         if (vmwa->nl_socket >= 0)
325                 close(vmwa->nl_socket);
326         rte_free(vmwa);
327 }