1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Flow API operations for mlx4 driver.
11 #include <arpa/inet.h>
18 #include <sys/queue.h>
20 /* Verbs headers do not support -pedantic. */
22 #pragma GCC diagnostic ignored "-Wpedantic"
24 #include <infiniband/verbs.h>
26 #pragma GCC diagnostic error "-Wpedantic"
29 #include <rte_byteorder.h>
30 #include <rte_errno.h>
31 #include <rte_eth_ctrl.h>
32 #include <rte_ethdev_driver.h>
33 #include <rte_ether.h>
35 #include <rte_flow_driver.h>
36 #include <rte_malloc.h>
40 #include "mlx4_glue.h"
41 #include "mlx4_flow.h"
42 #include "mlx4_rxtx.h"
43 #include "mlx4_utils.h"
45 /** IBV supported RSS hash functions combinations */
46 #define MLX4_IBV_IPV4_HF ( \
47 IBV_RX_HASH_SRC_IPV4 | \
49 #define MLX4_IBV_IPV6_HF ( \
50 IBV_RX_HASH_SRC_IPV6 | \
52 #define MLX4_IBV_TCP_HF ( \
53 IBV_RX_HASH_SRC_PORT_TCP | \
54 IBV_RX_HASH_DST_PORT_TCP)
55 #define MLX4_IBV_UDP_HF ( \
56 IBV_RX_HASH_SRC_PORT_UDP | \
57 IBV_RX_HASH_DST_PORT_UDP)
59 /** Supported RSS hash functions combinations */
60 #define MLX4_RSS_IPV4_HF ( \
63 ETH_RSS_NONFRAG_IPV4_OTHER)
64 #define MLX4_RSS_IPV6_HF ( \
67 ETH_RSS_NONFRAG_IPV6_OTHER | \
69 #define MLX4_RSS_IPV4_TCP_HF ( \
70 ETH_RSS_NONFRAG_IPV4_TCP)
71 #define MLX4_RSS_IPV6_TCP_HF ( \
72 ETH_RSS_NONFRAG_IPV6_TCP | \
74 #define MLX4_RSS_IPV4_UDP_HF ( \
75 ETH_RSS_NONFRAG_IPV4_UDP)
76 #define MLX4_RSS_IPV6_UDP_HF ( \
77 ETH_RSS_NONFRAG_IPV6_UDP | \
80 /** Static initializer for a list of subsequent item types. */
81 #define NEXT_ITEM(...) \
82 (const enum rte_flow_item_type []){ \
83 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
86 /** Processor structure associated with a flow item. */
87 struct mlx4_flow_proc_item {
88 /** Bit-mask for fields supported by this PMD. */
89 const void *mask_support;
90 /** Bit-mask to use when @p item->mask is not provided. */
91 const void *mask_default;
92 /** Size in bytes for @p mask_support and @p mask_default. */
93 const unsigned int mask_sz;
94 /** Merge a pattern item into a flow rule handle. */
95 int (*merge)(struct rte_flow *flow,
96 const struct rte_flow_item *item,
97 const struct mlx4_flow_proc_item *proc,
98 struct rte_flow_error *error);
99 /** Size in bytes of the destination structure. */
100 const unsigned int dst_sz;
101 /** List of possible subsequent items. */
102 const enum rte_flow_item_type *const next_item;
105 /** Shared resources for drop flow rules. */
107 struct ibv_qp *qp; /**< QP target. */
108 struct ibv_cq *cq; /**< CQ associated with above QP. */
109 struct priv *priv; /**< Back pointer to private data. */
110 uint32_t refcnt; /**< Reference count. */
114 * Convert DPDK RSS hash types to their Verbs equivalent.
116 * This function returns the supported (default) set when @p types has
120 * Pointer to private structure.
122 * Hash types in DPDK format (see struct rte_eth_rss_conf).
125 * A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
126 * otherwise and rte_errno is set.
129 mlx4_conv_rss_types(struct priv *priv, uint64_t types)
131 enum { IPV4, IPV6, TCP, UDP, };
132 static const uint64_t in[] = {
133 [IPV4] = (ETH_RSS_IPV4 |
135 ETH_RSS_NONFRAG_IPV4_TCP |
136 ETH_RSS_NONFRAG_IPV4_UDP |
137 ETH_RSS_NONFRAG_IPV4_OTHER),
138 [IPV6] = (ETH_RSS_IPV6 |
140 ETH_RSS_NONFRAG_IPV6_TCP |
141 ETH_RSS_NONFRAG_IPV6_UDP |
142 ETH_RSS_NONFRAG_IPV6_OTHER |
144 ETH_RSS_IPV6_TCP_EX |
145 ETH_RSS_IPV6_UDP_EX),
146 [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP |
147 ETH_RSS_NONFRAG_IPV6_TCP |
148 ETH_RSS_IPV6_TCP_EX),
149 [UDP] = (ETH_RSS_NONFRAG_IPV4_UDP |
150 ETH_RSS_NONFRAG_IPV6_UDP |
151 ETH_RSS_IPV6_UDP_EX),
153 static const uint64_t out[RTE_DIM(in)] = {
154 [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
155 [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
156 [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
157 [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
164 return priv->hw_rss_sup;
165 for (i = 0; i != RTE_DIM(in); ++i)
167 seen |= types & in[i];
170 if ((conv & priv->hw_rss_sup) == conv && !(types & ~seen))
177 * Convert verbs RSS types to their DPDK equivalents.
179 * This function returns a group of RSS DPDK types given their equivalent group
181 * For example both source IPv4 and destination IPv4 verbs types are converted
182 * into their equivalent RSS group types. If each of these verbs types existed
183 * exclusively - no conversion would take place.
186 * RSS hash types in verbs format.
189 * DPDK RSS hash fields supported by mlx4.
192 mlx4_ibv_to_rss_types(uint64_t types)
194 enum { IPV4, IPV6, IPV4_TCP, IPV6_TCP, IPV4_UDP, IPV6_UDP};
196 static const uint64_t in[] = {
197 [IPV4] = MLX4_IBV_IPV4_HF,
198 [IPV6] = MLX4_IBV_IPV6_HF,
199 [IPV4_TCP] = MLX4_IBV_IPV4_HF | MLX4_IBV_TCP_HF,
200 [IPV6_TCP] = MLX4_IBV_IPV6_HF | MLX4_IBV_TCP_HF,
201 [IPV4_UDP] = MLX4_IBV_IPV4_HF | MLX4_IBV_UDP_HF,
202 [IPV6_UDP] = MLX4_IBV_IPV6_HF | MLX4_IBV_UDP_HF,
204 static const uint64_t out[RTE_DIM(in)] = {
205 [IPV4] = MLX4_RSS_IPV4_HF,
206 [IPV6] = MLX4_RSS_IPV6_HF,
207 [IPV4_TCP] = MLX4_RSS_IPV4_HF | MLX4_RSS_IPV4_TCP_HF,
208 [IPV6_TCP] = MLX4_RSS_IPV6_HF | MLX4_RSS_IPV6_TCP_HF,
209 [IPV4_UDP] = MLX4_RSS_IPV4_HF | MLX4_RSS_IPV4_UDP_HF,
210 [IPV6_UDP] = MLX4_RSS_IPV6_HF | MLX4_RSS_IPV6_UDP_HF,
215 for (i = 0; i != RTE_DIM(in); ++i)
216 if ((types & in[i]) == in[i])
222 * Merge Ethernet pattern item into flow rule handle.
224 * Additional mlx4-specific constraints on supported fields:
226 * - No support for partial masks, except in the specific case of matching
227 * all multicast traffic (@p spec->dst and @p mask->dst equal to
228 * 01:00:00:00:00:00).
229 * - Not providing @p item->spec or providing an empty @p mask->dst is
230 * *only* supported if the rule doesn't specify additional matching
231 * criteria (i.e. rule is promiscuous-like).
233 * @param[in, out] flow
234 * Flow rule handle to update.
236 * Pattern item to merge.
238 * Associated item-processing object.
240 * Perform verbose error reporting if not NULL.
243 * 0 on success, a negative errno value otherwise and rte_errno is set.
246 mlx4_flow_merge_eth(struct rte_flow *flow,
247 const struct rte_flow_item *item,
248 const struct mlx4_flow_proc_item *proc,
249 struct rte_flow_error *error)
251 const struct rte_flow_item_eth *spec = item->spec;
252 const struct rte_flow_item_eth *mask =
253 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
254 struct ibv_flow_spec_eth *eth;
261 uint32_t sum_dst = 0;
262 uint32_t sum_src = 0;
264 for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
265 sum_dst += mask->dst.addr_bytes[i];
266 sum_src += mask->src.addr_bytes[i];
269 msg = "mlx4 does not support source MAC matching";
271 } else if (!sum_dst) {
273 } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
274 if (!(spec->dst.addr_bytes[0] & 1)) {
275 msg = "mlx4 does not support the explicit"
276 " exclusion of all multicast traffic";
280 } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
281 msg = "mlx4 does not support matching partial"
289 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
292 if (flow->allmulti) {
293 flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
296 ++flow->ibv_attr->num_of_specs;
297 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
298 *eth = (struct ibv_flow_spec_eth) {
299 .type = IBV_FLOW_SPEC_ETH,
300 .size = sizeof(*eth),
302 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
303 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
304 /* Remove unwanted bits from values. */
305 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
306 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
310 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
315 * Merge VLAN pattern item into flow rule handle.
317 * Additional mlx4-specific constraints on supported fields:
319 * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
320 * empty @p item->mask would also include non-VLAN traffic. Doing so is
321 * therefore unsupported.
322 * - No support for partial masks.
324 * @param[in, out] flow
325 * Flow rule handle to update.
327 * Pattern item to merge.
329 * Associated item-processing object.
331 * Perform verbose error reporting if not NULL.
334 * 0 on success, a negative errno value otherwise and rte_errno is set.
337 mlx4_flow_merge_vlan(struct rte_flow *flow,
338 const struct rte_flow_item *item,
339 const struct mlx4_flow_proc_item *proc,
340 struct rte_flow_error *error)
342 const struct rte_flow_item_vlan *spec = item->spec;
343 const struct rte_flow_item_vlan *mask =
344 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
345 struct ibv_flow_spec_eth *eth;
348 if (!mask || !mask->tci) {
349 msg = "mlx4 cannot match all VLAN traffic while excluding"
350 " non-VLAN traffic, TCI VID must be specified";
353 if (mask->tci != RTE_BE16(0x0fff)) {
354 msg = "mlx4 does not support partial TCI VID matching";
359 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
361 eth->val.vlan_tag = spec->tci;
362 eth->mask.vlan_tag = mask->tci;
363 eth->val.vlan_tag &= eth->mask.vlan_tag;
366 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
371 * Merge IPv4 pattern item into flow rule handle.
373 * Additional mlx4-specific constraints on supported fields:
375 * - No support for partial masks.
377 * @param[in, out] flow
378 * Flow rule handle to update.
380 * Pattern item to merge.
382 * Associated item-processing object.
384 * Perform verbose error reporting if not NULL.
387 * 0 on success, a negative errno value otherwise and rte_errno is set.
390 mlx4_flow_merge_ipv4(struct rte_flow *flow,
391 const struct rte_flow_item *item,
392 const struct mlx4_flow_proc_item *proc,
393 struct rte_flow_error *error)
395 const struct rte_flow_item_ipv4 *spec = item->spec;
396 const struct rte_flow_item_ipv4 *mask =
397 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
398 struct ibv_flow_spec_ipv4 *ipv4;
402 ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
403 (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
404 msg = "mlx4 does not support matching partial IPv4 fields";
409 ++flow->ibv_attr->num_of_specs;
410 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
411 *ipv4 = (struct ibv_flow_spec_ipv4) {
412 .type = IBV_FLOW_SPEC_IPV4,
413 .size = sizeof(*ipv4),
417 ipv4->val = (struct ibv_flow_ipv4_filter) {
418 .src_ip = spec->hdr.src_addr,
419 .dst_ip = spec->hdr.dst_addr,
421 ipv4->mask = (struct ibv_flow_ipv4_filter) {
422 .src_ip = mask->hdr.src_addr,
423 .dst_ip = mask->hdr.dst_addr,
425 /* Remove unwanted bits from values. */
426 ipv4->val.src_ip &= ipv4->mask.src_ip;
427 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
430 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
435 * Merge UDP pattern item into flow rule handle.
437 * Additional mlx4-specific constraints on supported fields:
439 * - No support for partial masks.
440 * - Due to HW/FW limitation, flow rule priority is not taken into account
441 * when matching UDP destination ports, doing is therefore only supported
442 * at the highest priority level (0).
444 * @param[in, out] flow
445 * Flow rule handle to update.
447 * Pattern item to merge.
449 * Associated item-processing object.
451 * Perform verbose error reporting if not NULL.
454 * 0 on success, a negative errno value otherwise and rte_errno is set.
457 mlx4_flow_merge_udp(struct rte_flow *flow,
458 const struct rte_flow_item *item,
459 const struct mlx4_flow_proc_item *proc,
460 struct rte_flow_error *error)
462 const struct rte_flow_item_udp *spec = item->spec;
463 const struct rte_flow_item_udp *mask =
464 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
465 struct ibv_flow_spec_tcp_udp *udp;
469 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
470 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
471 msg = "mlx4 does not support matching partial UDP fields";
474 if (mask && mask->hdr.dst_port && flow->priority) {
475 msg = "combining UDP destination port matching with a nonzero"
476 " priority level is not supported";
481 ++flow->ibv_attr->num_of_specs;
482 udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
483 *udp = (struct ibv_flow_spec_tcp_udp) {
484 .type = IBV_FLOW_SPEC_UDP,
485 .size = sizeof(*udp),
489 udp->val.dst_port = spec->hdr.dst_port;
490 udp->val.src_port = spec->hdr.src_port;
491 udp->mask.dst_port = mask->hdr.dst_port;
492 udp->mask.src_port = mask->hdr.src_port;
493 /* Remove unwanted bits from values. */
494 udp->val.src_port &= udp->mask.src_port;
495 udp->val.dst_port &= udp->mask.dst_port;
498 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
503 * Merge TCP pattern item into flow rule handle.
505 * Additional mlx4-specific constraints on supported fields:
507 * - No support for partial masks.
509 * @param[in, out] flow
510 * Flow rule handle to update.
512 * Pattern item to merge.
514 * Associated item-processing object.
516 * Perform verbose error reporting if not NULL.
519 * 0 on success, a negative errno value otherwise and rte_errno is set.
522 mlx4_flow_merge_tcp(struct rte_flow *flow,
523 const struct rte_flow_item *item,
524 const struct mlx4_flow_proc_item *proc,
525 struct rte_flow_error *error)
527 const struct rte_flow_item_tcp *spec = item->spec;
528 const struct rte_flow_item_tcp *mask =
529 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
530 struct ibv_flow_spec_tcp_udp *tcp;
534 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
535 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
536 msg = "mlx4 does not support matching partial TCP fields";
541 ++flow->ibv_attr->num_of_specs;
542 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
543 *tcp = (struct ibv_flow_spec_tcp_udp) {
544 .type = IBV_FLOW_SPEC_TCP,
545 .size = sizeof(*tcp),
549 tcp->val.dst_port = spec->hdr.dst_port;
550 tcp->val.src_port = spec->hdr.src_port;
551 tcp->mask.dst_port = mask->hdr.dst_port;
552 tcp->mask.src_port = mask->hdr.src_port;
553 /* Remove unwanted bits from values. */
554 tcp->val.src_port &= tcp->mask.src_port;
555 tcp->val.dst_port &= tcp->mask.dst_port;
558 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
563 * Perform basic sanity checks on a pattern item.
566 * Item specification.
568 * Associated item-processing object.
570 * Perform verbose error reporting if not NULL.
573 * 0 on success, a negative errno value otherwise and rte_errno is set.
576 mlx4_flow_item_check(const struct rte_flow_item *item,
577 const struct mlx4_flow_proc_item *proc,
578 struct rte_flow_error *error)
583 /* item->last and item->mask cannot exist without item->spec. */
584 if (!item->spec && (item->mask || item->last))
585 return rte_flow_error_set
586 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
587 "\"mask\" or \"last\" field provided without a"
588 " corresponding \"spec\"");
589 /* No spec, no mask, no problem. */
593 (const uint8_t *)item->mask :
594 (const uint8_t *)proc->mask_default;
597 * Single-pass check to make sure that:
598 * - Mask is supported, no bits are set outside proc->mask_support.
599 * - Both item->spec and item->last are included in mask.
601 for (i = 0; i != proc->mask_sz; ++i) {
604 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
605 ((const uint8_t *)proc->mask_support)[i])
606 return rte_flow_error_set
607 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
608 item, "unsupported field found in \"mask\"");
610 (((const uint8_t *)item->spec)[i] & mask[i]) !=
611 (((const uint8_t *)item->last)[i] & mask[i]))
612 return rte_flow_error_set
613 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
615 "range between \"spec\" and \"last\""
616 " is larger than \"mask\"");
621 /** Graph of supported items and associated actions. */
622 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
623 [RTE_FLOW_ITEM_TYPE_END] = {
624 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
626 [RTE_FLOW_ITEM_TYPE_ETH] = {
627 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
628 RTE_FLOW_ITEM_TYPE_IPV4),
629 .mask_support = &(const struct rte_flow_item_eth){
630 /* Only destination MAC can be matched. */
631 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
633 .mask_default = &rte_flow_item_eth_mask,
634 .mask_sz = sizeof(struct rte_flow_item_eth),
635 .merge = mlx4_flow_merge_eth,
636 .dst_sz = sizeof(struct ibv_flow_spec_eth),
638 [RTE_FLOW_ITEM_TYPE_VLAN] = {
639 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
640 .mask_support = &(const struct rte_flow_item_vlan){
641 /* Only TCI VID matching is supported. */
642 .tci = RTE_BE16(0x0fff),
644 .mask_default = &rte_flow_item_vlan_mask,
645 .mask_sz = sizeof(struct rte_flow_item_vlan),
646 .merge = mlx4_flow_merge_vlan,
649 [RTE_FLOW_ITEM_TYPE_IPV4] = {
650 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
651 RTE_FLOW_ITEM_TYPE_TCP),
652 .mask_support = &(const struct rte_flow_item_ipv4){
654 .src_addr = RTE_BE32(0xffffffff),
655 .dst_addr = RTE_BE32(0xffffffff),
658 .mask_default = &rte_flow_item_ipv4_mask,
659 .mask_sz = sizeof(struct rte_flow_item_ipv4),
660 .merge = mlx4_flow_merge_ipv4,
661 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
663 [RTE_FLOW_ITEM_TYPE_UDP] = {
664 .mask_support = &(const struct rte_flow_item_udp){
666 .src_port = RTE_BE16(0xffff),
667 .dst_port = RTE_BE16(0xffff),
670 .mask_default = &rte_flow_item_udp_mask,
671 .mask_sz = sizeof(struct rte_flow_item_udp),
672 .merge = mlx4_flow_merge_udp,
673 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
675 [RTE_FLOW_ITEM_TYPE_TCP] = {
676 .mask_support = &(const struct rte_flow_item_tcp){
678 .src_port = RTE_BE16(0xffff),
679 .dst_port = RTE_BE16(0xffff),
682 .mask_default = &rte_flow_item_tcp_mask,
683 .mask_sz = sizeof(struct rte_flow_item_tcp),
684 .merge = mlx4_flow_merge_tcp,
685 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
690 * Make sure a flow rule is supported and initialize associated structure.
693 * Pointer to private structure.
695 * Flow rule attributes.
697 * Pattern specification (list terminated by the END pattern item).
699 * Associated actions (list terminated by the END action).
701 * Perform verbose error reporting if not NULL.
702 * @param[in, out] addr
703 * Buffer where the resulting flow rule handle pointer must be stored.
704 * If NULL, stop processing after validation stage.
707 * 0 on success, a negative errno value otherwise and rte_errno is set.
710 mlx4_flow_prepare(struct priv *priv,
711 const struct rte_flow_attr *attr,
712 const struct rte_flow_item pattern[],
713 const struct rte_flow_action actions[],
714 struct rte_flow_error *error,
715 struct rte_flow **addr)
717 const struct rte_flow_item *item;
718 const struct rte_flow_action *action;
719 const struct mlx4_flow_proc_item *proc;
720 struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
721 struct rte_flow *flow = &temp;
722 const char *msg = NULL;
726 return rte_flow_error_set
727 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
728 NULL, "groups are not supported");
729 if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
730 return rte_flow_error_set
731 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
732 NULL, "maximum priority level is "
733 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
735 return rte_flow_error_set
736 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
737 NULL, "egress is not supported");
739 return rte_flow_error_set
740 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
741 NULL, "transfer is not supported");
743 return rte_flow_error_set
744 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
745 NULL, "only ingress is supported");
748 proc = mlx4_flow_proc_item_list;
749 flow->priority = attr->priority;
750 /* Go over pattern. */
751 for (item = pattern; item->type; ++item) {
752 const struct mlx4_flow_proc_item *next = NULL;
756 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
758 if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
762 if (flow->promisc || flow->allmulti) {
763 msg = "mlx4 does not support additional matching"
764 " criteria combined with indiscriminate"
765 " matching on Ethernet headers";
766 goto exit_item_not_supported;
768 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
769 if (proc->next_item[i] == item->type) {
770 next = &mlx4_flow_proc_item_list[item->type];
775 goto exit_item_not_supported;
778 * Perform basic sanity checks only once, while handle is
782 err = mlx4_flow_item_check(item, proc, error);
787 err = proc->merge(flow, item, proc, error);
791 flow->ibv_attr_size += proc->dst_sz;
793 /* Go over actions list. */
794 for (action = actions; action->type; ++action) {
795 /* This one may appear anywhere multiple times. */
796 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
798 /* Fate-deciding actions may appear exactly once. */
800 msg = "cannot combine several fate-deciding actions,"
801 " choose between DROP, QUEUE or RSS";
802 goto exit_action_not_supported;
805 switch (action->type) {
806 const struct rte_flow_action_queue *queue;
807 const struct rte_flow_action_rss *rss;
808 const uint8_t *rss_key;
809 uint32_t rss_key_len;
813 case RTE_FLOW_ACTION_TYPE_DROP:
816 case RTE_FLOW_ACTION_TYPE_QUEUE:
819 queue = action->conf;
820 if (queue->index >= priv->dev->data->nb_rx_queues) {
821 msg = "queue target index beyond number of"
822 " configured Rx queues";
823 goto exit_action_not_supported;
825 flow->rss = mlx4_rss_get
826 (priv, 0, mlx4_rss_hash_key_default, 1,
829 msg = "not enough resources for additional"
830 " single-queue RSS context";
831 goto exit_action_not_supported;
834 case RTE_FLOW_ACTION_TYPE_RSS:
838 /* Default RSS configuration if none is provided. */
841 rss_key_len = rss->key_len;
843 rss_key = mlx4_rss_hash_key_default;
844 rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
847 for (i = 0; i < rss->queue_num; ++i)
849 priv->dev->data->nb_rx_queues)
851 if (i != rss->queue_num) {
852 msg = "queue index target beyond number of"
853 " configured Rx queues";
854 goto exit_action_not_supported;
856 if (!rte_is_power_of_2(rss->queue_num)) {
857 msg = "for RSS, mlx4 requires the number of"
858 " queues to be a power of two";
859 goto exit_action_not_supported;
861 if (rss_key_len != sizeof(flow->rss->key)) {
862 msg = "mlx4 supports exactly one RSS hash key"
864 MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
865 goto exit_action_not_supported;
867 for (i = 1; i < rss->queue_num; ++i)
868 if (rss->queue[i] - rss->queue[i - 1] != 1)
870 if (i != rss->queue_num) {
871 msg = "mlx4 requires RSS contexts to use"
872 " consecutive queue indices only";
873 goto exit_action_not_supported;
875 if (rss->queue[0] % rss->queue_num) {
876 msg = "mlx4 requires the first queue of a RSS"
877 " context to be aligned on a multiple"
878 " of the context size";
879 goto exit_action_not_supported;
882 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
883 msg = "the only supported RSS hash function"
885 goto exit_action_not_supported;
888 msg = "a nonzero RSS encapsulation level is"
890 goto exit_action_not_supported;
893 fields = mlx4_conv_rss_types(priv, rss->types);
894 if (fields == (uint64_t)-1 && rte_errno) {
895 msg = "unsupported RSS hash type requested";
896 goto exit_action_not_supported;
898 flow->rss = mlx4_rss_get
899 (priv, fields, rss_key, rss->queue_num,
902 msg = "either invalid parameters or not enough"
903 " resources for additional multi-queue"
905 goto exit_action_not_supported;
909 goto exit_action_not_supported;
912 /* When fate is unknown, drop traffic. */
915 /* Validation ends here. */
918 mlx4_rss_put(flow->rss);
922 /* Allocate proper handle based on collected data. */
923 const struct mlx4_malloc_vec vec[] = {
925 .align = alignof(struct rte_flow),
926 .size = sizeof(*flow),
927 .addr = (void **)&flow,
930 .align = alignof(struct ibv_flow_attr),
931 .size = temp.ibv_attr_size,
932 .addr = (void **)&temp.ibv_attr,
936 if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) {
938 mlx4_rss_put(temp.rss);
939 return rte_flow_error_set
941 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
942 "flow rule handle allocation failure");
944 /* Most fields will be updated by second pass. */
945 *flow = (struct rte_flow){
946 .ibv_attr = temp.ibv_attr,
947 .ibv_attr_size = sizeof(*flow->ibv_attr),
950 *flow->ibv_attr = (struct ibv_flow_attr){
951 .type = IBV_FLOW_ATTR_NORMAL,
952 .size = sizeof(*flow->ibv_attr),
953 .priority = attr->priority,
960 exit_item_not_supported:
961 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
962 item, msg ? msg : "item not supported");
963 exit_action_not_supported:
964 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
965 action, msg ? msg : "action not supported");
969 * Validate a flow supported by the NIC.
971 * @see rte_flow_validate()
975 mlx4_flow_validate(struct rte_eth_dev *dev,
976 const struct rte_flow_attr *attr,
977 const struct rte_flow_item pattern[],
978 const struct rte_flow_action actions[],
979 struct rte_flow_error *error)
981 struct priv *priv = dev->data->dev_private;
983 return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
987 * Get a drop flow rule resources instance.
990 * Pointer to private structure.
993 * Pointer to drop flow resources on success, NULL otherwise and rte_errno
996 static struct mlx4_drop *
997 mlx4_drop_get(struct priv *priv)
999 struct mlx4_drop *drop = priv->drop;
1002 assert(drop->refcnt);
1003 assert(drop->priv == priv);
1007 drop = rte_malloc(__func__, sizeof(*drop), 0);
1010 *drop = (struct mlx4_drop){
1014 drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
1017 drop->qp = mlx4_glue->create_qp
1019 &(struct ibv_qp_init_attr){
1020 .send_cq = drop->cq,
1021 .recv_cq = drop->cq,
1022 .qp_type = IBV_QPT_RAW_PACKET,
1030 claim_zero(mlx4_glue->destroy_qp(drop->qp));
1032 claim_zero(mlx4_glue->destroy_cq(drop->cq));
1040 * Give back a drop flow rule resources instance.
1043 * Pointer to drop flow rule resources.
1046 mlx4_drop_put(struct mlx4_drop *drop)
1048 assert(drop->refcnt);
1051 drop->priv->drop = NULL;
1052 claim_zero(mlx4_glue->destroy_qp(drop->qp));
1053 claim_zero(mlx4_glue->destroy_cq(drop->cq));
1058 * Toggle a configured flow rule.
1061 * Pointer to private structure.
1063 * Flow rule handle to toggle.
1065 * Whether associated Verbs flow must be created or removed.
1067 * Perform verbose error reporting if not NULL.
1070 * 0 on success, a negative errno value otherwise and rte_errno is set.
1073 mlx4_flow_toggle(struct priv *priv,
1074 struct rte_flow *flow,
1076 struct rte_flow_error *error)
1078 struct ibv_qp *qp = NULL;
1083 if (!flow->ibv_flow)
1085 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1086 flow->ibv_flow = NULL;
1088 mlx4_drop_put(priv->drop);
1090 mlx4_rss_detach(flow->rss);
1093 assert(flow->ibv_attr);
1094 if (!flow->internal &&
1096 flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
1097 if (flow->ibv_flow) {
1098 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1099 flow->ibv_flow = NULL;
1101 mlx4_drop_put(priv->drop);
1103 mlx4_rss_detach(flow->rss);
1106 msg = ("priority level "
1107 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
1108 " is reserved when not in isolated mode");
1112 struct mlx4_rss *rss = flow->rss;
1116 /* Stop at the first nonexistent target queue. */
1117 for (i = 0; i != rss->queues; ++i)
1118 if (rss->queue_id[i] >=
1119 priv->dev->data->nb_rx_queues ||
1120 !priv->dev->data->rx_queues[rss->queue_id[i]]) {
1124 if (flow->ibv_flow) {
1125 if (missing ^ !flow->drop)
1127 /* Verbs flow needs updating. */
1128 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1129 flow->ibv_flow = NULL;
1131 mlx4_drop_put(priv->drop);
1133 mlx4_rss_detach(rss);
1136 err = mlx4_rss_attach(rss);
1139 msg = "cannot create indirection table or hash"
1140 " QP to associate flow rule with";
1145 /* A missing target queue drops traffic implicitly. */
1146 flow->drop = missing;
1151 mlx4_drop_get(priv);
1154 msg = "resources for drop flow rule cannot be created";
1157 qp = priv->drop->qp;
1162 flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
1166 mlx4_drop_put(priv->drop);
1168 mlx4_rss_detach(flow->rss);
1170 msg = "flow rule rejected by device";
1172 return rte_flow_error_set
1173 (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
1179 * @see rte_flow_create()
1182 static struct rte_flow *
1183 mlx4_flow_create(struct rte_eth_dev *dev,
1184 const struct rte_flow_attr *attr,
1185 const struct rte_flow_item pattern[],
1186 const struct rte_flow_action actions[],
1187 struct rte_flow_error *error)
1189 struct priv *priv = dev->data->dev_private;
1190 struct rte_flow *flow;
1193 err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
1196 err = mlx4_flow_toggle(priv, flow, priv->started, error);
1198 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1200 /* New rules are inserted after internal ones. */
1201 if (!curr || !curr->internal) {
1202 LIST_INSERT_HEAD(&priv->flows, flow, next);
1204 while (LIST_NEXT(curr, next) &&
1205 LIST_NEXT(curr, next)->internal)
1206 curr = LIST_NEXT(curr, next);
1207 LIST_INSERT_AFTER(curr, flow, next);
1212 mlx4_rss_put(flow->rss);
1213 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1220 * Configure isolated mode.
1222 * @see rte_flow_isolate()
1226 mlx4_flow_isolate(struct rte_eth_dev *dev,
1228 struct rte_flow_error *error)
1230 struct priv *priv = dev->data->dev_private;
1232 if (!!enable == !!priv->isolated)
1234 priv->isolated = !!enable;
1235 if (mlx4_flow_sync(priv, error)) {
1236 priv->isolated = !enable;
1243 * Destroy a flow rule.
1245 * @see rte_flow_destroy()
1249 mlx4_flow_destroy(struct rte_eth_dev *dev,
1250 struct rte_flow *flow,
1251 struct rte_flow_error *error)
1253 struct priv *priv = dev->data->dev_private;
1254 int err = mlx4_flow_toggle(priv, flow, 0, error);
1258 LIST_REMOVE(flow, next);
1260 mlx4_rss_put(flow->rss);
1266 * Destroy user-configured flow rules.
1268 * This function skips internal flows rules.
1270 * @see rte_flow_flush()
1274 mlx4_flow_flush(struct rte_eth_dev *dev,
1275 struct rte_flow_error *error)
1277 struct priv *priv = dev->data->dev_private;
1278 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1281 struct rte_flow *next = LIST_NEXT(flow, next);
1283 if (!flow->internal)
1284 mlx4_flow_destroy(dev, flow, error);
1291 * Helper function to determine the next configured VLAN filter.
1294 * Pointer to private structure.
1296 * VLAN ID to use as a starting point.
1299 * Next configured VLAN ID or a high value (>= 4096) if there is none.
1302 mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
1304 while (vlan < 4096) {
1305 if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
1306 (UINT64_C(1) << (vlan % 64)))
1314 * Generate internal flow rules.
1316 * Various flow rules are created depending on the mode the device is in:
1319 * port MAC + broadcast + catch-all (VLAN filtering is ignored).
1321 * port MAC/VLAN + broadcast + catch-all multicast.
1323 * port MAC/VLAN + broadcast MAC/VLAN.
1325 * About MAC flow rules:
1327 * - MAC flow rules are generated from @p dev->data->mac_addrs
1328 * (@p priv->mac array).
1329 * - An additional flow rule for Ethernet broadcasts is also generated.
1330 * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
1331 * is enabled and VLAN filters are configured.
1334 * Pointer to private structure.
1336 * Perform verbose error reporting if not NULL.
1339 * 0 on success, a negative errno value otherwise and rte_errno is set.
1342 mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
1344 struct rte_flow_attr attr = {
1345 .priority = MLX4_FLOW_PRIORITY_LAST,
1348 struct rte_flow_item_eth eth_spec;
1349 const struct rte_flow_item_eth eth_mask = {
1350 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1352 const struct rte_flow_item_eth eth_allmulti = {
1353 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1355 struct rte_flow_item_vlan vlan_spec;
1356 const struct rte_flow_item_vlan vlan_mask = {
1357 .tci = RTE_BE16(0x0fff),
1359 struct rte_flow_item pattern[] = {
1361 .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
1364 .type = RTE_FLOW_ITEM_TYPE_ETH,
1369 /* Replaced with VLAN if filtering is enabled. */
1370 .type = RTE_FLOW_ITEM_TYPE_END,
1373 .type = RTE_FLOW_ITEM_TYPE_END,
1377 * Round number of queues down to their previous power of 2 to
1378 * comply with RSS context limitations. Extra queues silently do not
1379 * get RSS by default.
1382 rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
1383 uint16_t queue[queues];
1384 struct rte_flow_action_rss action_rss = {
1385 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
1388 .key_len = MLX4_RSS_HASH_KEY_SIZE,
1389 .queue_num = queues,
1390 .key = mlx4_rss_hash_key_default,
1393 struct rte_flow_action actions[] = {
1395 .type = RTE_FLOW_ACTION_TYPE_RSS,
1396 .conf = &action_rss,
1399 .type = RTE_FLOW_ACTION_TYPE_END,
1402 struct ether_addr *rule_mac = ð_spec.dst;
1403 rte_be16_t *rule_vlan =
1404 (priv->dev->data->dev_conf.rxmode.offloads &
1405 DEV_RX_OFFLOAD_VLAN_FILTER) &&
1406 !priv->dev->data->promiscuous ?
1410 struct rte_flow *flow;
1414 /* Nothing to be done if there are no Rx queues. */
1417 /* Prepare default RSS configuration. */
1418 for (i = 0; i != queues; ++i)
1421 * Set up VLAN item if filtering is enabled and at least one VLAN
1422 * filter is configured.
1425 vlan = mlx4_flow_internal_next_vlan(priv, 0);
1427 pattern[2] = (struct rte_flow_item){
1428 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1433 *rule_vlan = rte_cpu_to_be_16(vlan);
1438 for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
1439 const struct ether_addr *mac;
1441 /* Broadcasts are handled by an extra iteration. */
1442 if (i < RTE_DIM(priv->mac))
1443 mac = &priv->mac[i];
1445 mac = ð_mask.dst;
1446 if (is_zero_ether_addr(mac))
1448 /* Check if MAC flow rule is already present. */
1449 for (flow = LIST_FIRST(&priv->flows);
1450 flow && flow->internal;
1451 flow = LIST_NEXT(flow, next)) {
1452 const struct ibv_flow_spec_eth *eth =
1453 (const void *)((uintptr_t)flow->ibv_attr +
1454 sizeof(*flow->ibv_attr));
1459 assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
1460 assert(flow->ibv_attr->num_of_specs == 1);
1461 assert(eth->type == IBV_FLOW_SPEC_ETH);
1464 (eth->val.vlan_tag != *rule_vlan ||
1465 eth->mask.vlan_tag != RTE_BE16(0x0fff)))
1467 if (!rule_vlan && eth->mask.vlan_tag)
1469 for (j = 0; j != sizeof(mac->addr_bytes); ++j)
1470 if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
1471 eth->mask.dst_mac[j] != UINT8_C(0xff) ||
1472 eth->val.src_mac[j] != UINT8_C(0x00) ||
1473 eth->mask.src_mac[j] != UINT8_C(0x00))
1475 if (j != sizeof(mac->addr_bytes))
1477 if (flow->rss->queues != queues ||
1478 memcmp(flow->rss->queue_id, action_rss.queue,
1479 queues * sizeof(flow->rss->queue_id[0])))
1483 if (!flow || !flow->internal) {
1484 /* Not found, create a new flow rule. */
1485 memcpy(rule_mac, mac, sizeof(*mac));
1486 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1497 vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
1501 /* Take care of promiscuous and all multicast flow rules. */
1502 if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
1503 for (flow = LIST_FIRST(&priv->flows);
1504 flow && flow->internal;
1505 flow = LIST_NEXT(flow, next)) {
1506 if (priv->dev->data->promiscuous) {
1510 assert(priv->dev->data->all_multicast);
1515 if (flow && flow->internal) {
1517 if (flow->rss->queues != queues ||
1518 memcmp(flow->rss->queue_id, action_rss.queue,
1519 queues * sizeof(flow->rss->queue_id[0])))
1522 if (!flow || !flow->internal) {
1523 /* Not found, create a new flow rule. */
1524 if (priv->dev->data->promiscuous) {
1525 pattern[1].spec = NULL;
1526 pattern[1].mask = NULL;
1528 assert(priv->dev->data->all_multicast);
1529 pattern[1].spec = ð_allmulti;
1530 pattern[1].mask = ð_allmulti;
1532 pattern[2] = pattern[3];
1533 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1540 assert(flow->promisc || flow->allmulti);
1544 /* Clear selection and clean up stale internal flow rules. */
1545 flow = LIST_FIRST(&priv->flows);
1546 while (flow && flow->internal) {
1547 struct rte_flow *next = LIST_NEXT(flow, next);
1550 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1559 * Synchronize flow rules.
1561 * This function synchronizes flow rules with the state of the device by
1562 * taking into account isolated mode and whether target queues are
1566 * Pointer to private structure.
1568 * Perform verbose error reporting if not NULL.
1571 * 0 on success, a negative errno value otherwise and rte_errno is set.
1574 mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
1576 struct rte_flow *flow;
1579 /* Internal flow rules are guaranteed to come first in the list. */
1580 if (priv->isolated) {
1582 * Get rid of them in isolated mode, stop at the first
1583 * non-internal rule found.
1585 for (flow = LIST_FIRST(&priv->flows);
1586 flow && flow->internal;
1587 flow = LIST_FIRST(&priv->flows))
1588 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1590 /* Refresh internal rules. */
1591 ret = mlx4_flow_internal(priv, error);
1595 /* Toggle the remaining flow rules . */
1596 LIST_FOREACH(flow, &priv->flows, next) {
1597 ret = mlx4_flow_toggle(priv, flow, priv->started, error);
1602 assert(!priv->drop);
1607 * Clean up all flow rules.
1609 * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
1610 * rules regardless of whether they are internal or user-configured.
1613 * Pointer to private structure.
1616 mlx4_flow_clean(struct priv *priv)
1618 struct rte_flow *flow;
1620 while ((flow = LIST_FIRST(&priv->flows)))
1621 mlx4_flow_destroy(priv->dev, flow, NULL);
1622 assert(LIST_EMPTY(&priv->rss));
1625 static const struct rte_flow_ops mlx4_flow_ops = {
1626 .validate = mlx4_flow_validate,
1627 .create = mlx4_flow_create,
1628 .destroy = mlx4_flow_destroy,
1629 .flush = mlx4_flow_flush,
1630 .isolate = mlx4_flow_isolate,
1634 * Manage filter operations.
1637 * Pointer to Ethernet device structure.
1638 * @param filter_type
1641 * Operation to perform.
1643 * Pointer to operation-specific structure.
1646 * 0 on success, negative errno value otherwise and rte_errno is set.
1649 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1650 enum rte_filter_type filter_type,
1651 enum rte_filter_op filter_op,
1654 switch (filter_type) {
1655 case RTE_ETH_FILTER_GENERIC:
1656 if (filter_op != RTE_ETH_FILTER_GET)
1658 *(const void **)arg = &mlx4_flow_ops;
1661 ERROR("%p: filter type (%d) not supported",
1662 (void *)dev, filter_type);
1665 rte_errno = ENOTSUP;