4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
41 #pragma GCC diagnostic ignored "-Wpedantic"
43 #include <infiniband/verbs.h>
45 #pragma GCC diagnostic error "-Wpedantic"
48 #include <rte_ethdev.h>
51 #include "mlx5_rxtx.h"
52 #include "mlx5_utils.h"
54 /* Initialization data for special flows. */
55 static const struct special_flow_init special_flow_init[] = {
56 [HASH_RXQ_FLOW_TYPE_BROADCAST] = {
57 .dst_mac_val = "\xff\xff\xff\xff\xff\xff",
58 .dst_mac_mask = "\xff\xff\xff\xff\xff\xff",
68 [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = {
69 .dst_mac_val = "\x33\x33\x00\x00\x00\x00",
70 .dst_mac_mask = "\xff\xff\x00\x00\x00\x00",
81 * Enable a special flow in a hash RX queue for a given VLAN index.
84 * Pointer to hash RX queue structure.
91 * 0 on success, errno value on failure.
94 hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,
95 enum hash_rxq_flow_type flow_type,
96 unsigned int vlan_index)
98 struct priv *priv = hash_rxq->priv;
99 struct ibv_flow *flow;
100 FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));
101 struct ibv_flow_attr *attr = &data->attr;
102 struct ibv_flow_spec_eth *spec = &data->spec;
105 unsigned int vlan_enabled = (priv->vlan_filter_n &&
106 special_flow_init[flow_type].per_vlan);
107 unsigned int vlan_id = priv->vlan_filter[vlan_index];
109 /* Check if flow is relevant for this hash_rxq. */
110 if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type)))
112 /* Check if flow already exists. */
113 if (hash_rxq->special_flow[flow_type][vlan_index] != NULL)
117 * No padding must be inserted by the compiler between attr and spec.
118 * This layout is expected by libibverbs.
120 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
121 priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);
122 /* The first specification must be Ethernet. */
123 assert(spec->type == IBV_FLOW_SPEC_ETH);
124 assert(spec->size == sizeof(*spec));
126 mac = special_flow_init[flow_type].dst_mac_val;
127 mask = special_flow_init[flow_type].dst_mac_mask;
128 *spec = (struct ibv_flow_spec_eth){
129 .type = IBV_FLOW_SPEC_ETH,
130 .size = sizeof(*spec),
133 mac[0], mac[1], mac[2],
134 mac[3], mac[4], mac[5],
136 .vlan_tag = (vlan_enabled ?
137 rte_cpu_to_be_16(vlan_id) :
142 mask[0], mask[1], mask[2],
143 mask[3], mask[4], mask[5],
145 .vlan_tag = (vlan_enabled ?
146 rte_cpu_to_be_16(0xfff) :
152 flow = ibv_create_flow(hash_rxq->qp, attr);
154 /* It's not clear whether errno is always set in this case. */
155 ERROR("%p: flow configuration failed, errno=%d: %s",
156 (void *)hash_rxq, errno,
157 (errno ? strerror(errno) : "Unknown error"));
162 hash_rxq->special_flow[flow_type][vlan_index] = flow;
163 DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled",
164 (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
165 vlan_id, vlan_index);
170 * Disable a special flow in a hash RX queue for a given VLAN index.
173 * Pointer to hash RX queue structure.
180 hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq,
181 enum hash_rxq_flow_type flow_type,
182 unsigned int vlan_index)
184 struct ibv_flow *flow =
185 hash_rxq->special_flow[flow_type][vlan_index];
189 claim_zero(ibv_destroy_flow(flow));
190 hash_rxq->special_flow[flow_type][vlan_index] = NULL;
191 DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled",
192 (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
193 hash_rxq->priv->vlan_filter[vlan_index], vlan_index);
197 * Enable a special flow in a hash RX queue.
200 * Pointer to hash RX queue structure.
207 * 0 on success, errno value on failure.
210 hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq,
211 enum hash_rxq_flow_type flow_type)
213 struct priv *priv = hash_rxq->priv;
217 assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
218 assert(RTE_DIM(hash_rxq->special_flow[flow_type]) ==
219 RTE_DIM(priv->vlan_filter));
220 /* Add a special flow for each VLAN filter when relevant. */
222 ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i);
224 /* Failure, rollback. */
226 hash_rxq_special_flow_disable_vlan(hash_rxq,
231 } while (special_flow_init[flow_type].per_vlan &&
232 ++i < priv->vlan_filter_n);
237 * Disable a special flow in a hash RX queue.
240 * Pointer to hash RX queue structure.
245 hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq,
246 enum hash_rxq_flow_type flow_type)
250 assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
251 for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i)
252 hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i);
256 * Enable a special flow in all hash RX queues.
264 * 0 on success, errno value on failure.
267 priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type)
271 if (!priv_allow_flow_type(priv, flow_type))
273 for (i = 0; (i != priv->hash_rxqs_n); ++i) {
274 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
277 ret = hash_rxq_special_flow_enable(hash_rxq, flow_type);
280 /* Failure, rollback. */
282 hash_rxq = &(*priv->hash_rxqs)[--i];
283 hash_rxq_special_flow_disable(hash_rxq, flow_type);
291 * Disable a special flow in all hash RX queues.
299 priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type)
303 for (i = 0; (i != priv->hash_rxqs_n); ++i) {
304 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
306 hash_rxq_special_flow_disable(hash_rxq, flow_type);
311 * Enable all special flows in all hash RX queues.
317 priv_special_flow_enable_all(struct priv *priv)
319 enum hash_rxq_flow_type flow_type;
323 for (flow_type = HASH_RXQ_FLOW_TYPE_BROADCAST;
324 flow_type != HASH_RXQ_FLOW_TYPE_MAC;
328 ret = priv_special_flow_enable(priv, flow_type);
331 /* Failure, rollback. */
333 priv_special_flow_disable(priv, --flow_type);
340 * Disable all special flows in all hash RX queues.
346 priv_special_flow_disable_all(struct priv *priv)
348 enum hash_rxq_flow_type flow_type;
350 for (flow_type = HASH_RXQ_FLOW_TYPE_BROADCAST;
351 flow_type != HASH_RXQ_FLOW_TYPE_MAC;
353 priv_special_flow_disable(priv, flow_type);
357 * DPDK callback to enable promiscuous mode.
360 * Pointer to Ethernet device structure.
363 mlx5_promiscuous_enable(struct rte_eth_dev *dev)
365 struct rte_flow_item_eth eth = {
366 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
367 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
371 if (mlx5_is_secondary())
373 dev->data->promiscuous = 1;
374 claim_zero(mlx5_ctrl_flow(dev, ð, ð, 1));
378 * DPDK callback to disable promiscuous mode.
381 * Pointer to Ethernet device structure.
384 mlx5_promiscuous_disable(struct rte_eth_dev *dev)
386 struct rte_flow_item_eth eth = {
387 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
388 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
392 if (mlx5_is_secondary())
394 dev->data->promiscuous = 0;
395 claim_zero(mlx5_ctrl_flow(dev, ð, ð, 0));
399 * DPDK callback to enable allmulti mode.
402 * Pointer to Ethernet device structure.
405 mlx5_allmulticast_enable(struct rte_eth_dev *dev)
407 struct rte_flow_item_eth eth = {
408 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
409 .src.addr_bytes = "\x01\x00\x00\x00\x00\x00",
413 if (mlx5_is_secondary())
415 dev->data->all_multicast = 1;
416 if (dev->data->dev_started)
417 claim_zero(mlx5_ctrl_flow(dev, ð, ð, 1));
421 * DPDK callback to disable allmulti mode.
424 * Pointer to Ethernet device structure.
427 mlx5_allmulticast_disable(struct rte_eth_dev *dev)
429 struct rte_flow_item_eth eth = {
430 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
431 .src.addr_bytes = "\x01\x00\x00\x00\x00\x00",
435 if (mlx5_is_secondary())
437 dev->data->all_multicast = 0;
438 if (dev->data->dev_started)
439 claim_zero(mlx5_ctrl_flow(dev, ð, ð, 0));