1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
14 * Not needed by this file; included to work around the lack of off_t
15 * definition for mlx5dv.h with unpatched rdma-core versions.
17 #include <sys/types.h>
19 /* Verbs headers do not support -pedantic. */
21 #pragma GCC diagnostic ignored "-Wpedantic"
23 #include <infiniband/mlx5dv.h>
24 #include <infiniband/verbs.h>
26 #pragma GCC diagnostic error "-Wpedantic"
29 #include <rte_ethdev_driver.h>
30 #include <rte_common.h>
31 #include <rte_malloc.h>
32 #include <rte_hypervisor.h>
34 #include <mlx5_glue.h>
35 #include <mlx5_devx_cmds.h>
38 #include "mlx5_autoconf.h"
39 #include "mlx5_rxtx.h"
41 #include "mlx5_utils.h"
44 * DPDK callback to configure a VLAN filter.
47 * Pointer to Ethernet device structure.
54 * 0 on success, a negative errno value otherwise and rte_errno is set.
57 mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
59 struct mlx5_priv *priv = dev->data->dev_private;
62 DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
63 dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
64 assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
65 for (i = 0; (i != priv->vlan_filter_n); ++i)
66 if (priv->vlan_filter[i] == vlan_id)
68 /* Check if there's room for another VLAN filter. */
69 if (i == RTE_DIM(priv->vlan_filter)) {
73 if (i < priv->vlan_filter_n) {
74 assert(priv->vlan_filter_n != 0);
75 /* Enabling an existing VLAN filter has no effect. */
78 /* Remove VLAN filter from list. */
79 --priv->vlan_filter_n;
80 memmove(&priv->vlan_filter[i],
81 &priv->vlan_filter[i + 1],
82 sizeof(priv->vlan_filter[i]) *
83 (priv->vlan_filter_n - i));
84 priv->vlan_filter[priv->vlan_filter_n] = 0;
86 assert(i == priv->vlan_filter_n);
87 /* Disabling an unknown VLAN filter has no effect. */
90 /* Add new VLAN filter. */
91 priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
92 ++priv->vlan_filter_n;
95 if (dev->data->dev_started)
96 return mlx5_traffic_restart(dev);
101 * Callback to set/reset VLAN stripping for a specific queue.
104 * Pointer to Ethernet device structure.
108 * Enable/disable VLAN stripping.
111 mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
113 struct mlx5_priv *priv = dev->data->dev_private;
114 struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
115 struct mlx5_rxq_ctrl *rxq_ctrl =
116 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
117 struct ibv_wq_attr mod;
118 uint16_t vlan_offloads =
119 (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
123 /* Validate hw support */
124 if (!priv->config.hw_vlan_strip) {
125 DRV_LOG(ERR, "port %u VLAN stripping is not supported",
129 /* Validate queue number */
130 if (queue >= priv->rxqs_n) {
131 DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
132 dev->data->port_id, queue);
135 DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d",
136 dev->data->port_id, vlan_offloads, rxq->port_id, queue);
137 if (!rxq_ctrl->obj) {
138 /* Update related bits in RX queue. */
139 rxq->vlan_strip = !!on;
142 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
143 mod = (struct ibv_wq_attr){
144 .attr_mask = IBV_WQ_ATTR_FLAGS,
145 .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
146 .flags = vlan_offloads,
148 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
149 } else if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
150 struct mlx5_devx_modify_rq_attr rq_attr;
152 memset(&rq_attr, 0, sizeof(rq_attr));
153 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
154 rq_attr.state = MLX5_RQC_STATE_RDY;
155 rq_attr.vsd = (on ? 0 : 1);
156 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
157 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
160 DRV_LOG(ERR, "port %u failed to modify object %d stripping "
161 "mode: %s", dev->data->port_id,
162 rxq_ctrl->obj->type, strerror(rte_errno));
165 /* Update related bits in RX queue. */
166 rxq->vlan_strip = !!on;
170 * Callback to set/reset VLAN offloads for a port.
173 * Pointer to Ethernet device structure.
175 * VLAN offload bit mask.
178 * 0 on success, a negative errno value otherwise and rte_errno is set.
181 mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
183 struct mlx5_priv *priv = dev->data->dev_private;
186 if (mask & ETH_VLAN_STRIP_MASK) {
187 int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
188 DEV_RX_OFFLOAD_VLAN_STRIP);
190 if (!priv->config.hw_vlan_strip) {
191 DRV_LOG(ERR, "port %u VLAN stripping is not supported",
195 /* Run on every RX queue and set/reset VLAN stripping. */
196 for (i = 0; (i != priv->rxqs_n); i++)
197 mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip);
203 * Release VLAN network device, created for VM workaround.
206 * Ethernet device object, Netlink context provider.
208 * Object representing the network device to release.
210 void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
211 struct mlx5_vf_vlan *vlan)
213 struct mlx5_priv *priv = dev->data->dev_private;
214 struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
215 struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
217 assert(vlan->created);
218 assert(priv->vmwa_context);
219 if (!vlan->created || !vmwa)
222 assert(vlan_dev[vlan->tag].refcnt);
223 if (--vlan_dev[vlan->tag].refcnt == 0 &&
224 vlan_dev[vlan->tag].ifindex) {
225 mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
226 vlan_dev[vlan->tag].ifindex = 0;
231 * Acquire VLAN interface with specified tag for VM workaround.
234 * Ethernet device object, Netlink context provider.
236 * Object representing the network device to acquire.
238 void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
239 struct mlx5_vf_vlan *vlan)
241 struct mlx5_priv *priv = dev->data->dev_private;
242 struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
243 struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
245 assert(!vlan->created);
246 assert(priv->vmwa_context);
247 if (vlan->created || !vmwa)
249 if (vlan_dev[vlan->tag].refcnt == 0) {
250 assert(!vlan_dev[vlan->tag].ifindex);
251 vlan_dev[vlan->tag].ifindex =
252 mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
255 if (vlan_dev[vlan->tag].ifindex) {
256 vlan_dev[vlan->tag].refcnt++;
262 * Create per ethernet device VLAN VM workaround context
264 struct mlx5_nl_vlan_vmwa_context *
265 mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
267 struct mlx5_priv *priv = dev->data->dev_private;
268 struct mlx5_dev_config *config = &priv->config;
269 struct mlx5_nl_vlan_vmwa_context *vmwa;
270 enum rte_hypervisor hv_type;
272 /* Do not engage workaround over PF. */
275 /* Check whether there is desired virtual environment */
276 hv_type = rte_hypervisor_get();
278 case RTE_HYPERVISOR_UNKNOWN:
279 case RTE_HYPERVISOR_VMWARE:
281 * The "white list" of configurations
282 * to engage the workaround.
287 * The configuration is not found in the "white list".
288 * We should not engage the VLAN workaround.
292 vmwa = rte_zmalloc(__func__, sizeof(*vmwa), sizeof(uint32_t));
295 "Can not allocate memory"
296 " for VLAN workaround context");
299 vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
300 if (vmwa->nl_socket < 0) {
302 "Can not create Netlink socket"
303 " for VLAN workaround context");
307 vmwa->vf_ifindex = ifindex;
308 /* Cleanup for existing VLAN devices. */
313 * Destroy per ethernet device VLAN VM workaround context
315 void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *vmwa)
319 /* Delete all remaining VLAN devices. */
320 for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
321 if (vmwa->vlan_dev[i].ifindex)
322 mlx5_nl_vlan_vmwa_delete(vmwa,
323 vmwa->vlan_dev[i].ifindex);
325 if (vmwa->nl_socket >= 0)
326 close(vmwa->nl_socket);