1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
13 * Not needed by this file; included to work around the lack of off_t
14 * definition for mlx5dv.h with unpatched rdma-core versions.
16 #include <sys/types.h>
18 /* Verbs headers do not support -pedantic. */
20 #pragma GCC diagnostic ignored "-Wpedantic"
22 #include <infiniband/mlx5dv.h>
23 #include <infiniband/verbs.h>
25 #pragma GCC diagnostic error "-Wpedantic"
28 #include <rte_ethdev_driver.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_hypervisor.h>
33 #include <mlx5_glue.h>
34 #include <mlx5_devx_cmds.h>
38 #include "mlx5_autoconf.h"
39 #include "mlx5_rxtx.h"
40 #include "mlx5_utils.h"
43 * DPDK callback to configure a VLAN filter.
46 * Pointer to Ethernet device structure.
53 * 0 on success, a negative errno value otherwise and rte_errno is set.
56 mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
58 struct mlx5_priv *priv = dev->data->dev_private;
61 DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
62 dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
63 MLX5_ASSERT(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
64 for (i = 0; (i != priv->vlan_filter_n); ++i)
65 if (priv->vlan_filter[i] == vlan_id)
67 /* Check if there's room for another VLAN filter. */
68 if (i == RTE_DIM(priv->vlan_filter)) {
72 if (i < priv->vlan_filter_n) {
73 MLX5_ASSERT(priv->vlan_filter_n != 0);
74 /* Enabling an existing VLAN filter has no effect. */
77 /* Remove VLAN filter from list. */
78 --priv->vlan_filter_n;
79 memmove(&priv->vlan_filter[i],
80 &priv->vlan_filter[i + 1],
81 sizeof(priv->vlan_filter[i]) *
82 (priv->vlan_filter_n - i));
83 priv->vlan_filter[priv->vlan_filter_n] = 0;
85 MLX5_ASSERT(i == priv->vlan_filter_n);
86 /* Disabling an unknown VLAN filter has no effect. */
89 /* Add new VLAN filter. */
90 priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
91 ++priv->vlan_filter_n;
94 if (dev->data->dev_started)
95 return mlx5_traffic_restart(dev);
100 * Callback to set/reset VLAN stripping for a specific queue.
103 * Pointer to Ethernet device structure.
107 * Enable/disable VLAN stripping.
110 mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
112 struct mlx5_priv *priv = dev->data->dev_private;
113 struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
114 struct mlx5_rxq_ctrl *rxq_ctrl =
115 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
116 struct ibv_wq_attr mod;
117 uint16_t vlan_offloads =
118 (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
122 /* Validate hw support */
123 if (!priv->config.hw_vlan_strip) {
124 DRV_LOG(ERR, "port %u VLAN stripping is not supported",
128 /* Validate queue number */
129 if (queue >= priv->rxqs_n) {
130 DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
131 dev->data->port_id, queue);
134 DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d",
135 dev->data->port_id, vlan_offloads, rxq->port_id, queue);
136 if (!rxq_ctrl->obj) {
137 /* Update related bits in RX queue. */
138 rxq->vlan_strip = !!on;
141 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
142 mod = (struct ibv_wq_attr){
143 .attr_mask = IBV_WQ_ATTR_FLAGS,
144 .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
145 .flags = vlan_offloads,
147 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
148 } else if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
149 struct mlx5_devx_modify_rq_attr rq_attr;
151 memset(&rq_attr, 0, sizeof(rq_attr));
152 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
153 rq_attr.state = MLX5_RQC_STATE_RDY;
154 rq_attr.vsd = (on ? 0 : 1);
155 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
156 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
159 DRV_LOG(ERR, "port %u failed to modify object %d stripping "
160 "mode: %s", dev->data->port_id,
161 rxq_ctrl->obj->type, strerror(rte_errno));
164 /* Update related bits in RX queue. */
165 rxq->vlan_strip = !!on;
169 * Callback to set/reset VLAN offloads for a port.
172 * Pointer to Ethernet device structure.
174 * VLAN offload bit mask.
177 * 0 on success, a negative errno value otherwise and rte_errno is set.
180 mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
182 struct mlx5_priv *priv = dev->data->dev_private;
185 if (mask & ETH_VLAN_STRIP_MASK) {
186 int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
187 DEV_RX_OFFLOAD_VLAN_STRIP);
189 if (!priv->config.hw_vlan_strip) {
190 DRV_LOG(ERR, "port %u VLAN stripping is not supported",
194 /* Run on every RX queue and set/reset VLAN stripping. */
195 for (i = 0; (i != priv->rxqs_n); i++)
196 mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip);
202 * Release VLAN network device, created for VM workaround.
205 * Ethernet device object, Netlink context provider.
207 * Object representing the network device to release.
209 void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
210 struct mlx5_vf_vlan *vlan)
212 struct mlx5_priv *priv = dev->data->dev_private;
213 struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
214 struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
216 MLX5_ASSERT(vlan->created);
217 MLX5_ASSERT(priv->vmwa_context);
218 if (!vlan->created || !vmwa)
221 MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
222 if (--vlan_dev[vlan->tag].refcnt == 0 &&
223 vlan_dev[vlan->tag].ifindex) {
224 mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
225 vlan_dev[vlan->tag].ifindex = 0;
230 * Acquire VLAN interface with specified tag for VM workaround.
233 * Ethernet device object, Netlink context provider.
235 * Object representing the network device to acquire.
237 void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
238 struct mlx5_vf_vlan *vlan)
240 struct mlx5_priv *priv = dev->data->dev_private;
241 struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
242 struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
244 MLX5_ASSERT(!vlan->created);
245 MLX5_ASSERT(priv->vmwa_context);
246 if (vlan->created || !vmwa)
248 if (vlan_dev[vlan->tag].refcnt == 0) {
249 MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
250 vlan_dev[vlan->tag].ifindex =
251 mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
254 if (vlan_dev[vlan->tag].ifindex) {
255 vlan_dev[vlan->tag].refcnt++;
261 * Create per ethernet device VLAN VM workaround context
263 struct mlx5_nl_vlan_vmwa_context *
264 mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
266 struct mlx5_priv *priv = dev->data->dev_private;
267 struct mlx5_dev_config *config = &priv->config;
268 struct mlx5_nl_vlan_vmwa_context *vmwa;
269 enum rte_hypervisor hv_type;
271 /* Do not engage workaround over PF. */
274 /* Check whether there is desired virtual environment */
275 hv_type = rte_hypervisor_get();
277 case RTE_HYPERVISOR_UNKNOWN:
278 case RTE_HYPERVISOR_VMWARE:
280 * The "white list" of configurations
281 * to engage the workaround.
286 * The configuration is not found in the "white list".
287 * We should not engage the VLAN workaround.
291 vmwa = rte_zmalloc(__func__, sizeof(*vmwa), sizeof(uint32_t));
294 "Can not allocate memory"
295 " for VLAN workaround context");
298 vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
299 if (vmwa->nl_socket < 0) {
301 "Can not create Netlink socket"
302 " for VLAN workaround context");
306 vmwa->vf_ifindex = ifindex;
307 /* Cleanup for existing VLAN devices. */
312 * Destroy per ethernet device VLAN VM workaround context
314 void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *vmwa)
318 /* Delete all remaining VLAN devices. */
319 for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
320 if (vmwa->vlan_dev[i].ifindex)
321 mlx5_nl_vlan_vmwa_delete(vmwa,
322 vmwa->vlan_dev[i].ifindex);
324 if (vmwa->nl_socket >= 0)
325 close(vmwa->nl_socket);