1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Flow API operations for mlx4 driver.
11 #include <arpa/inet.h>
18 #include <sys/queue.h>
20 /* Verbs headers do not support -pedantic. */
22 #pragma GCC diagnostic ignored "-Wpedantic"
24 #include <infiniband/verbs.h>
26 #pragma GCC diagnostic error "-Wpedantic"
29 #include <rte_byteorder.h>
30 #include <rte_errno.h>
31 #include <rte_eth_ctrl.h>
32 #include <rte_ethdev_driver.h>
33 #include <rte_ether.h>
35 #include <rte_flow_driver.h>
36 #include <rte_malloc.h>
40 #include "mlx4_glue.h"
41 #include "mlx4_flow.h"
42 #include "mlx4_rxtx.h"
43 #include "mlx4_utils.h"
45 /** Static initializer for a list of subsequent item types. */
46 #define NEXT_ITEM(...) \
47 (const enum rte_flow_item_type []){ \
48 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
51 /** Processor structure associated with a flow item. */
52 struct mlx4_flow_proc_item {
53 /** Bit-mask for fields supported by this PMD. */
54 const void *mask_support;
55 /** Bit-mask to use when @p item->mask is not provided. */
56 const void *mask_default;
57 /** Size in bytes for @p mask_support and @p mask_default. */
58 const unsigned int mask_sz;
59 /** Merge a pattern item into a flow rule handle. */
60 int (*merge)(struct rte_flow *flow,
61 const struct rte_flow_item *item,
62 const struct mlx4_flow_proc_item *proc,
63 struct rte_flow_error *error);
64 /** Size in bytes of the destination structure. */
65 const unsigned int dst_sz;
66 /** List of possible subsequent items. */
67 const enum rte_flow_item_type *const next_item;
70 /** Shared resources for drop flow rules. */
72 struct ibv_qp *qp; /**< QP target. */
73 struct ibv_cq *cq; /**< CQ associated with above QP. */
74 struct mlx4_priv *priv; /**< Back pointer to private data. */
75 uint32_t refcnt; /**< Reference count. */
79 * Convert supported RSS hash field types between DPDK and Verbs formats.
81 * This function returns the supported (default) set when @p types has
85 * Pointer to private structure.
87 * Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct
88 * rte_eth_rss_conf) or Verbs format.
89 * @param verbs_to_dpdk
90 * A zero value converts @p types from DPDK to Verbs, a nonzero value
91 * performs the reverse operation.
94 * Converted RSS hash fields on success, (uint64_t)-1 otherwise and
98 mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
102 IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3,
104 IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1,
107 VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
108 VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
109 VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
110 VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
112 static const uint64_t dpdk[] = {
114 [IPV4] = ETH_RSS_IPV4,
115 [IPV4_1] = ETH_RSS_FRAG_IPV4,
116 [IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
117 [IPV6] = ETH_RSS_IPV6,
118 [IPV6_1] = ETH_RSS_FRAG_IPV6,
119 [IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
120 [IPV6_3] = ETH_RSS_IPV6_EX,
123 [IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
124 [IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
125 [IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
126 [IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
127 [IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
128 [IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
130 static const uint64_t verbs[RTE_DIM(dpdk)] = {
131 [INNER] = IBV_RX_HASH_INNER,
133 [IPV4_1] = VERBS_IPV4,
134 [IPV4_2] = VERBS_IPV4,
136 [IPV6_1] = VERBS_IPV6,
137 [IPV6_2] = VERBS_IPV6,
138 [IPV6_3] = VERBS_IPV6,
141 [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP,
142 [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP,
143 [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP,
144 [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP,
145 [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP,
146 [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP,
148 const uint64_t *in = verbs_to_dpdk ? verbs : dpdk;
149 const uint64_t *out = verbs_to_dpdk ? dpdk : verbs;
156 return priv->hw_rss_sup;
157 types = priv->hw_rss_sup;
159 for (i = 0; i != RTE_DIM(dpdk); ++i)
160 if (in[i] && (types & in[i]) == in[i]) {
161 seen |= types & in[i];
164 if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) &&
172 * Merge Ethernet pattern item into flow rule handle.
174 * Additional mlx4-specific constraints on supported fields:
176 * - No support for partial masks, except in the specific case of matching
177 * all multicast traffic (@p spec->dst and @p mask->dst equal to
178 * 01:00:00:00:00:00).
179 * - Not providing @p item->spec or providing an empty @p mask->dst is
180 * *only* supported if the rule doesn't specify additional matching
181 * criteria (i.e. rule is promiscuous-like).
183 * @param[in, out] flow
184 * Flow rule handle to update.
186 * Pattern item to merge.
188 * Associated item-processing object.
190 * Perform verbose error reporting if not NULL.
193 * 0 on success, a negative errno value otherwise and rte_errno is set.
196 mlx4_flow_merge_eth(struct rte_flow *flow,
197 const struct rte_flow_item *item,
198 const struct mlx4_flow_proc_item *proc,
199 struct rte_flow_error *error)
201 const struct rte_flow_item_eth *spec = item->spec;
202 const struct rte_flow_item_eth *mask =
203 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
204 struct ibv_flow_spec_eth *eth;
209 uint32_t sum_dst = 0;
210 uint32_t sum_src = 0;
212 for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
213 sum_dst += mask->dst.addr_bytes[i];
214 sum_src += mask->src.addr_bytes[i];
217 msg = "mlx4 does not support source MAC matching";
219 } else if (!sum_dst) {
221 } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
222 if (!(spec->dst.addr_bytes[0] & 1)) {
223 msg = "mlx4 does not support the explicit"
224 " exclusion of all multicast traffic";
228 } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
229 msg = "mlx4 does not support matching partial"
237 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
240 if (flow->allmulti) {
241 flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
244 ++flow->ibv_attr->num_of_specs;
245 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
246 *eth = (struct ibv_flow_spec_eth) {
247 .type = IBV_FLOW_SPEC_ETH,
248 .size = sizeof(*eth),
251 eth->val.dst_mac[0] = 0xff;
252 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
256 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
257 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
258 /* Remove unwanted bits from values. */
259 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
260 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
264 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
269 * Merge VLAN pattern item into flow rule handle.
271 * Additional mlx4-specific constraints on supported fields:
273 * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
274 * empty @p item->mask would also include non-VLAN traffic. Doing so is
275 * therefore unsupported.
276 * - No support for partial masks.
278 * @param[in, out] flow
279 * Flow rule handle to update.
281 * Pattern item to merge.
283 * Associated item-processing object.
285 * Perform verbose error reporting if not NULL.
288 * 0 on success, a negative errno value otherwise and rte_errno is set.
291 mlx4_flow_merge_vlan(struct rte_flow *flow,
292 const struct rte_flow_item *item,
293 const struct mlx4_flow_proc_item *proc,
294 struct rte_flow_error *error)
296 const struct rte_flow_item_vlan *spec = item->spec;
297 const struct rte_flow_item_vlan *mask =
298 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
299 struct ibv_flow_spec_eth *eth;
302 if (!mask || !mask->tci) {
303 msg = "mlx4 cannot match all VLAN traffic while excluding"
304 " non-VLAN traffic, TCI VID must be specified";
307 if (mask->tci != RTE_BE16(0x0fff)) {
308 msg = "mlx4 does not support partial TCI VID matching";
313 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
315 eth->val.vlan_tag = spec->tci;
316 eth->mask.vlan_tag = mask->tci;
317 eth->val.vlan_tag &= eth->mask.vlan_tag;
318 if (flow->ibv_attr->type == IBV_FLOW_ATTR_ALL_DEFAULT)
319 flow->ibv_attr->type = IBV_FLOW_ATTR_NORMAL;
322 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
327 * Merge IPv4 pattern item into flow rule handle.
329 * Additional mlx4-specific constraints on supported fields:
331 * - No support for partial masks.
333 * @param[in, out] flow
334 * Flow rule handle to update.
336 * Pattern item to merge.
338 * Associated item-processing object.
340 * Perform verbose error reporting if not NULL.
343 * 0 on success, a negative errno value otherwise and rte_errno is set.
346 mlx4_flow_merge_ipv4(struct rte_flow *flow,
347 const struct rte_flow_item *item,
348 const struct mlx4_flow_proc_item *proc,
349 struct rte_flow_error *error)
351 const struct rte_flow_item_ipv4 *spec = item->spec;
352 const struct rte_flow_item_ipv4 *mask =
353 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
354 struct ibv_flow_spec_ipv4 *ipv4;
358 ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
359 (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
360 msg = "mlx4 does not support matching partial IPv4 fields";
365 ++flow->ibv_attr->num_of_specs;
366 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
367 *ipv4 = (struct ibv_flow_spec_ipv4) {
368 .type = IBV_FLOW_SPEC_IPV4,
369 .size = sizeof(*ipv4),
373 ipv4->val = (struct ibv_flow_ipv4_filter) {
374 .src_ip = spec->hdr.src_addr,
375 .dst_ip = spec->hdr.dst_addr,
377 ipv4->mask = (struct ibv_flow_ipv4_filter) {
378 .src_ip = mask->hdr.src_addr,
379 .dst_ip = mask->hdr.dst_addr,
381 /* Remove unwanted bits from values. */
382 ipv4->val.src_ip &= ipv4->mask.src_ip;
383 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
386 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
391 * Merge UDP pattern item into flow rule handle.
393 * Additional mlx4-specific constraints on supported fields:
395 * - No support for partial masks.
396 * - Due to HW/FW limitation, flow rule priority is not taken into account
397 * when matching UDP destination ports, doing is therefore only supported
398 * at the highest priority level (0).
400 * @param[in, out] flow
401 * Flow rule handle to update.
403 * Pattern item to merge.
405 * Associated item-processing object.
407 * Perform verbose error reporting if not NULL.
410 * 0 on success, a negative errno value otherwise and rte_errno is set.
413 mlx4_flow_merge_udp(struct rte_flow *flow,
414 const struct rte_flow_item *item,
415 const struct mlx4_flow_proc_item *proc,
416 struct rte_flow_error *error)
418 const struct rte_flow_item_udp *spec = item->spec;
419 const struct rte_flow_item_udp *mask =
420 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
421 struct ibv_flow_spec_tcp_udp *udp;
425 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
426 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
427 msg = "mlx4 does not support matching partial UDP fields";
430 if (mask && mask->hdr.dst_port && flow->priority) {
431 msg = "combining UDP destination port matching with a nonzero"
432 " priority level is not supported";
437 ++flow->ibv_attr->num_of_specs;
438 udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
439 *udp = (struct ibv_flow_spec_tcp_udp) {
440 .type = IBV_FLOW_SPEC_UDP,
441 .size = sizeof(*udp),
445 udp->val.dst_port = spec->hdr.dst_port;
446 udp->val.src_port = spec->hdr.src_port;
447 udp->mask.dst_port = mask->hdr.dst_port;
448 udp->mask.src_port = mask->hdr.src_port;
449 /* Remove unwanted bits from values. */
450 udp->val.src_port &= udp->mask.src_port;
451 udp->val.dst_port &= udp->mask.dst_port;
454 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
459 * Merge TCP pattern item into flow rule handle.
461 * Additional mlx4-specific constraints on supported fields:
463 * - No support for partial masks.
465 * @param[in, out] flow
466 * Flow rule handle to update.
468 * Pattern item to merge.
470 * Associated item-processing object.
472 * Perform verbose error reporting if not NULL.
475 * 0 on success, a negative errno value otherwise and rte_errno is set.
478 mlx4_flow_merge_tcp(struct rte_flow *flow,
479 const struct rte_flow_item *item,
480 const struct mlx4_flow_proc_item *proc,
481 struct rte_flow_error *error)
483 const struct rte_flow_item_tcp *spec = item->spec;
484 const struct rte_flow_item_tcp *mask =
485 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
486 struct ibv_flow_spec_tcp_udp *tcp;
490 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
491 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
492 msg = "mlx4 does not support matching partial TCP fields";
497 ++flow->ibv_attr->num_of_specs;
498 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
499 *tcp = (struct ibv_flow_spec_tcp_udp) {
500 .type = IBV_FLOW_SPEC_TCP,
501 .size = sizeof(*tcp),
505 tcp->val.dst_port = spec->hdr.dst_port;
506 tcp->val.src_port = spec->hdr.src_port;
507 tcp->mask.dst_port = mask->hdr.dst_port;
508 tcp->mask.src_port = mask->hdr.src_port;
509 /* Remove unwanted bits from values. */
510 tcp->val.src_port &= tcp->mask.src_port;
511 tcp->val.dst_port &= tcp->mask.dst_port;
514 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
519 * Perform basic sanity checks on a pattern item.
522 * Item specification.
524 * Associated item-processing object.
526 * Perform verbose error reporting if not NULL.
529 * 0 on success, a negative errno value otherwise and rte_errno is set.
532 mlx4_flow_item_check(const struct rte_flow_item *item,
533 const struct mlx4_flow_proc_item *proc,
534 struct rte_flow_error *error)
539 /* item->last and item->mask cannot exist without item->spec. */
540 if (!item->spec && (item->mask || item->last))
541 return rte_flow_error_set
542 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
543 "\"mask\" or \"last\" field provided without a"
544 " corresponding \"spec\"");
545 /* No spec, no mask, no problem. */
549 (const uint8_t *)item->mask :
550 (const uint8_t *)proc->mask_default;
553 * Single-pass check to make sure that:
554 * - Mask is supported, no bits are set outside proc->mask_support.
555 * - Both item->spec and item->last are included in mask.
557 for (i = 0; i != proc->mask_sz; ++i) {
560 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
561 ((const uint8_t *)proc->mask_support)[i])
562 return rte_flow_error_set
563 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
564 item, "unsupported field found in \"mask\"");
566 (((const uint8_t *)item->spec)[i] & mask[i]) !=
567 (((const uint8_t *)item->last)[i] & mask[i]))
568 return rte_flow_error_set
569 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
571 "range between \"spec\" and \"last\""
572 " is larger than \"mask\"");
577 /** Graph of supported items and associated actions. */
578 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
579 [RTE_FLOW_ITEM_TYPE_END] = {
580 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
582 [RTE_FLOW_ITEM_TYPE_ETH] = {
583 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
584 RTE_FLOW_ITEM_TYPE_IPV4),
585 .mask_support = &(const struct rte_flow_item_eth){
586 /* Only destination MAC can be matched. */
587 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
589 .mask_default = &rte_flow_item_eth_mask,
590 .mask_sz = sizeof(struct rte_flow_item_eth),
591 .merge = mlx4_flow_merge_eth,
592 .dst_sz = sizeof(struct ibv_flow_spec_eth),
594 [RTE_FLOW_ITEM_TYPE_VLAN] = {
595 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
596 .mask_support = &(const struct rte_flow_item_vlan){
597 /* Only TCI VID matching is supported. */
598 .tci = RTE_BE16(0x0fff),
600 .mask_default = &rte_flow_item_vlan_mask,
601 .mask_sz = sizeof(struct rte_flow_item_vlan),
602 .merge = mlx4_flow_merge_vlan,
605 [RTE_FLOW_ITEM_TYPE_IPV4] = {
606 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
607 RTE_FLOW_ITEM_TYPE_TCP),
608 .mask_support = &(const struct rte_flow_item_ipv4){
610 .src_addr = RTE_BE32(0xffffffff),
611 .dst_addr = RTE_BE32(0xffffffff),
614 .mask_default = &rte_flow_item_ipv4_mask,
615 .mask_sz = sizeof(struct rte_flow_item_ipv4),
616 .merge = mlx4_flow_merge_ipv4,
617 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
619 [RTE_FLOW_ITEM_TYPE_UDP] = {
620 .mask_support = &(const struct rte_flow_item_udp){
622 .src_port = RTE_BE16(0xffff),
623 .dst_port = RTE_BE16(0xffff),
626 .mask_default = &rte_flow_item_udp_mask,
627 .mask_sz = sizeof(struct rte_flow_item_udp),
628 .merge = mlx4_flow_merge_udp,
629 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
631 [RTE_FLOW_ITEM_TYPE_TCP] = {
632 .mask_support = &(const struct rte_flow_item_tcp){
634 .src_port = RTE_BE16(0xffff),
635 .dst_port = RTE_BE16(0xffff),
638 .mask_default = &rte_flow_item_tcp_mask,
639 .mask_sz = sizeof(struct rte_flow_item_tcp),
640 .merge = mlx4_flow_merge_tcp,
641 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
646 * Make sure a flow rule is supported and initialize associated structure.
649 * Pointer to private structure.
651 * Flow rule attributes.
653 * Pattern specification (list terminated by the END pattern item).
655 * Associated actions (list terminated by the END action).
657 * Perform verbose error reporting if not NULL.
658 * @param[in, out] addr
659 * Buffer where the resulting flow rule handle pointer must be stored.
660 * If NULL, stop processing after validation stage.
663 * 0 on success, a negative errno value otherwise and rte_errno is set.
666 mlx4_flow_prepare(struct mlx4_priv *priv,
667 const struct rte_flow_attr *attr,
668 const struct rte_flow_item pattern[],
669 const struct rte_flow_action actions[],
670 struct rte_flow_error *error,
671 struct rte_flow **addr)
673 const struct rte_flow_item *item;
674 const struct rte_flow_action *action;
675 const struct mlx4_flow_proc_item *proc;
676 struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
677 struct rte_flow *flow = &temp;
678 const char *msg = NULL;
682 return rte_flow_error_set
683 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
684 NULL, "groups are not supported");
685 if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
686 return rte_flow_error_set
687 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
688 NULL, "maximum priority level is "
689 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
691 return rte_flow_error_set
692 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
693 NULL, "egress is not supported");
695 return rte_flow_error_set
696 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
697 NULL, "transfer is not supported");
699 return rte_flow_error_set
700 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
701 NULL, "only ingress is supported");
704 proc = mlx4_flow_proc_item_list;
705 flow->priority = attr->priority;
706 /* Go over pattern. */
707 for (item = pattern; item->type; ++item) {
708 const struct mlx4_flow_proc_item *next = NULL;
712 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
714 if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
718 if (flow->promisc || flow->allmulti) {
719 msg = "mlx4 does not support additional matching"
720 " criteria combined with indiscriminate"
721 " matching on Ethernet headers";
722 goto exit_item_not_supported;
724 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
725 if (proc->next_item[i] == item->type) {
726 next = &mlx4_flow_proc_item_list[item->type];
731 goto exit_item_not_supported;
734 * Perform basic sanity checks only once, while handle is
738 err = mlx4_flow_item_check(item, proc, error);
743 err = proc->merge(flow, item, proc, error);
747 flow->ibv_attr_size += proc->dst_sz;
749 /* Go over actions list. */
750 for (action = actions; action->type; ++action) {
751 /* This one may appear anywhere multiple times. */
752 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
754 /* Fate-deciding actions may appear exactly once. */
756 msg = "cannot combine several fate-deciding actions,"
757 " choose between DROP, QUEUE or RSS";
758 goto exit_action_not_supported;
761 switch (action->type) {
762 const struct rte_flow_action_queue *queue;
763 const struct rte_flow_action_rss *rss;
764 const uint8_t *rss_key;
765 uint32_t rss_key_len;
769 case RTE_FLOW_ACTION_TYPE_DROP:
772 case RTE_FLOW_ACTION_TYPE_QUEUE:
775 queue = action->conf;
776 if (queue->index >= priv->dev->data->nb_rx_queues) {
777 msg = "queue target index beyond number of"
778 " configured Rx queues";
779 goto exit_action_not_supported;
781 flow->rss = mlx4_rss_get
782 (priv, 0, mlx4_rss_hash_key_default, 1,
785 msg = "not enough resources for additional"
786 " single-queue RSS context";
787 goto exit_action_not_supported;
790 case RTE_FLOW_ACTION_TYPE_RSS:
794 /* Default RSS configuration if none is provided. */
797 rss_key_len = rss->key_len;
799 rss_key = mlx4_rss_hash_key_default;
800 rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
803 for (i = 0; i < rss->queue_num; ++i)
805 priv->dev->data->nb_rx_queues)
807 if (i != rss->queue_num) {
808 msg = "queue index target beyond number of"
809 " configured Rx queues";
810 goto exit_action_not_supported;
812 if (!rte_is_power_of_2(rss->queue_num)) {
813 msg = "for RSS, mlx4 requires the number of"
814 " queues to be a power of two";
815 goto exit_action_not_supported;
817 if (rss_key_len != sizeof(flow->rss->key)) {
818 msg = "mlx4 supports exactly one RSS hash key"
820 MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
821 goto exit_action_not_supported;
823 for (i = 1; i < rss->queue_num; ++i)
824 if (rss->queue[i] - rss->queue[i - 1] != 1)
826 if (i != rss->queue_num) {
827 msg = "mlx4 requires RSS contexts to use"
828 " consecutive queue indices only";
829 goto exit_action_not_supported;
831 if (rss->queue[0] % rss->queue_num) {
832 msg = "mlx4 requires the first queue of a RSS"
833 " context to be aligned on a multiple"
834 " of the context size";
835 goto exit_action_not_supported;
838 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
839 msg = "the only supported RSS hash function"
841 goto exit_action_not_supported;
844 msg = "a nonzero RSS encapsulation level is"
846 goto exit_action_not_supported;
849 fields = mlx4_conv_rss_types(priv, rss->types, 0);
850 if (fields == (uint64_t)-1 && rte_errno) {
851 msg = "unsupported RSS hash type requested";
852 goto exit_action_not_supported;
854 flow->rss = mlx4_rss_get
855 (priv, fields, rss_key, rss->queue_num,
858 msg = "either invalid parameters or not enough"
859 " resources for additional multi-queue"
861 goto exit_action_not_supported;
865 goto exit_action_not_supported;
868 /* When fate is unknown, drop traffic. */
871 /* Validation ends here. */
874 mlx4_rss_put(flow->rss);
878 /* Allocate proper handle based on collected data. */
879 const struct mlx4_malloc_vec vec[] = {
881 .align = alignof(struct rte_flow),
882 .size = sizeof(*flow),
883 .addr = (void **)&flow,
886 .align = alignof(struct ibv_flow_attr),
887 .size = temp.ibv_attr_size,
888 .addr = (void **)&temp.ibv_attr,
892 if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) {
894 mlx4_rss_put(temp.rss);
895 return rte_flow_error_set
897 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
898 "flow rule handle allocation failure");
900 /* Most fields will be updated by second pass. */
901 *flow = (struct rte_flow){
902 .ibv_attr = temp.ibv_attr,
903 .ibv_attr_size = sizeof(*flow->ibv_attr),
906 *flow->ibv_attr = (struct ibv_flow_attr){
907 .type = IBV_FLOW_ATTR_NORMAL,
908 .size = sizeof(*flow->ibv_attr),
909 .priority = attr->priority,
916 exit_item_not_supported:
917 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
918 item, msg ? msg : "item not supported");
919 exit_action_not_supported:
920 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
921 action, msg ? msg : "action not supported");
925 * Validate a flow supported by the NIC.
927 * @see rte_flow_validate()
931 mlx4_flow_validate(struct rte_eth_dev *dev,
932 const struct rte_flow_attr *attr,
933 const struct rte_flow_item pattern[],
934 const struct rte_flow_action actions[],
935 struct rte_flow_error *error)
937 struct mlx4_priv *priv = dev->data->dev_private;
939 return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
943 * Get a drop flow rule resources instance.
946 * Pointer to private structure.
949 * Pointer to drop flow resources on success, NULL otherwise and rte_errno
952 static struct mlx4_drop *
953 mlx4_drop_get(struct mlx4_priv *priv)
955 struct mlx4_drop *drop = priv->drop;
958 assert(drop->refcnt);
959 assert(drop->priv == priv);
963 drop = rte_malloc(__func__, sizeof(*drop), 0);
966 *drop = (struct mlx4_drop){
970 drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
973 drop->qp = mlx4_glue->create_qp
975 &(struct ibv_qp_init_attr){
978 .qp_type = IBV_QPT_RAW_PACKET,
986 claim_zero(mlx4_glue->destroy_qp(drop->qp));
988 claim_zero(mlx4_glue->destroy_cq(drop->cq));
996 * Give back a drop flow rule resources instance.
999 * Pointer to drop flow rule resources.
1002 mlx4_drop_put(struct mlx4_drop *drop)
1004 assert(drop->refcnt);
1007 drop->priv->drop = NULL;
1008 claim_zero(mlx4_glue->destroy_qp(drop->qp));
1009 claim_zero(mlx4_glue->destroy_cq(drop->cq));
1014 * Toggle a configured flow rule.
1017 * Pointer to private structure.
1019 * Flow rule handle to toggle.
1021 * Whether associated Verbs flow must be created or removed.
1023 * Perform verbose error reporting if not NULL.
1026 * 0 on success, a negative errno value otherwise and rte_errno is set.
1029 mlx4_flow_toggle(struct mlx4_priv *priv,
1030 struct rte_flow *flow,
1032 struct rte_flow_error *error)
1034 struct ibv_qp *qp = NULL;
1039 if (!flow->ibv_flow)
1041 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1042 flow->ibv_flow = NULL;
1044 mlx4_drop_put(priv->drop);
1046 mlx4_rss_detach(flow->rss);
1049 assert(flow->ibv_attr);
1050 if (!flow->internal &&
1052 flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
1053 if (flow->ibv_flow) {
1054 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1055 flow->ibv_flow = NULL;
1057 mlx4_drop_put(priv->drop);
1059 mlx4_rss_detach(flow->rss);
1062 msg = ("priority level "
1063 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
1064 " is reserved when not in isolated mode");
1068 struct mlx4_rss *rss = flow->rss;
1072 /* Stop at the first nonexistent target queue. */
1073 for (i = 0; i != rss->queues; ++i)
1074 if (rss->queue_id[i] >=
1075 priv->dev->data->nb_rx_queues ||
1076 !priv->dev->data->rx_queues[rss->queue_id[i]]) {
1080 if (flow->ibv_flow) {
1081 if (missing ^ !flow->drop)
1083 /* Verbs flow needs updating. */
1084 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1085 flow->ibv_flow = NULL;
1087 mlx4_drop_put(priv->drop);
1089 mlx4_rss_detach(rss);
1092 err = mlx4_rss_attach(rss);
1095 msg = "cannot create indirection table or hash"
1096 " QP to associate flow rule with";
1101 /* A missing target queue drops traffic implicitly. */
1102 flow->drop = missing;
1107 mlx4_drop_get(priv);
1110 msg = "resources for drop flow rule cannot be created";
1113 qp = priv->drop->qp;
1118 flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
1122 mlx4_drop_put(priv->drop);
1124 mlx4_rss_detach(flow->rss);
1126 msg = "flow rule rejected by device";
1128 return rte_flow_error_set
1129 (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
1135 * @see rte_flow_create()
1138 static struct rte_flow *
1139 mlx4_flow_create(struct rte_eth_dev *dev,
1140 const struct rte_flow_attr *attr,
1141 const struct rte_flow_item pattern[],
1142 const struct rte_flow_action actions[],
1143 struct rte_flow_error *error)
1145 struct mlx4_priv *priv = dev->data->dev_private;
1146 struct rte_flow *flow;
1149 err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
1152 err = mlx4_flow_toggle(priv, flow, priv->started, error);
1154 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1156 /* New rules are inserted after internal ones. */
1157 if (!curr || !curr->internal) {
1158 LIST_INSERT_HEAD(&priv->flows, flow, next);
1160 while (LIST_NEXT(curr, next) &&
1161 LIST_NEXT(curr, next)->internal)
1162 curr = LIST_NEXT(curr, next);
1163 LIST_INSERT_AFTER(curr, flow, next);
1168 mlx4_rss_put(flow->rss);
1169 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1176 * Configure isolated mode.
1178 * @see rte_flow_isolate()
1182 mlx4_flow_isolate(struct rte_eth_dev *dev,
1184 struct rte_flow_error *error)
1186 struct mlx4_priv *priv = dev->data->dev_private;
1188 if (!!enable == !!priv->isolated)
1190 priv->isolated = !!enable;
1191 if (mlx4_flow_sync(priv, error)) {
1192 priv->isolated = !enable;
1199 * Destroy a flow rule.
1201 * @see rte_flow_destroy()
1205 mlx4_flow_destroy(struct rte_eth_dev *dev,
1206 struct rte_flow *flow,
1207 struct rte_flow_error *error)
1209 struct mlx4_priv *priv = dev->data->dev_private;
1210 int err = mlx4_flow_toggle(priv, flow, 0, error);
1214 LIST_REMOVE(flow, next);
1216 mlx4_rss_put(flow->rss);
1222 * Destroy user-configured flow rules.
1224 * This function skips internal flows rules.
1226 * @see rte_flow_flush()
1230 mlx4_flow_flush(struct rte_eth_dev *dev,
1231 struct rte_flow_error *error)
1233 struct mlx4_priv *priv = dev->data->dev_private;
1234 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1237 struct rte_flow *next = LIST_NEXT(flow, next);
1239 if (!flow->internal)
1240 mlx4_flow_destroy(dev, flow, error);
1247 * Helper function to determine the next configured VLAN filter.
1250 * Pointer to private structure.
1252 * VLAN ID to use as a starting point.
1255 * Next configured VLAN ID or a high value (>= 4096) if there is none.
1258 mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
1260 while (vlan < 4096) {
1261 if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
1262 (UINT64_C(1) << (vlan % 64)))
1270 * Generate internal flow rules.
1272 * Various flow rules are created depending on the mode the device is in:
1275 * port MAC + broadcast + catch-all (VLAN filtering is ignored).
1277 * port MAC/VLAN + broadcast + catch-all multicast.
1279 * port MAC/VLAN + broadcast MAC/VLAN.
1281 * About MAC flow rules:
1283 * - MAC flow rules are generated from @p dev->data->mac_addrs
1284 * (@p priv->mac array).
1285 * - An additional flow rule for Ethernet broadcasts is also generated.
1286 * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
1287 * is enabled and VLAN filters are configured.
1290 * Pointer to private structure.
1292 * Perform verbose error reporting if not NULL.
1295 * 0 on success, a negative errno value otherwise and rte_errno is set.
1298 mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
1300 struct rte_flow_attr attr = {
1301 .priority = MLX4_FLOW_PRIORITY_LAST,
1304 struct rte_flow_item_eth eth_spec;
1305 const struct rte_flow_item_eth eth_mask = {
1306 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1308 const struct rte_flow_item_eth eth_allmulti = {
1309 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1311 struct rte_flow_item_vlan vlan_spec;
1312 const struct rte_flow_item_vlan vlan_mask = {
1313 .tci = RTE_BE16(0x0fff),
1315 struct rte_flow_item pattern[] = {
1317 .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
1320 .type = RTE_FLOW_ITEM_TYPE_ETH,
1325 /* Replaced with VLAN if filtering is enabled. */
1326 .type = RTE_FLOW_ITEM_TYPE_END,
1329 .type = RTE_FLOW_ITEM_TYPE_END,
1333 * Round number of queues down to their previous power of 2 to
1334 * comply with RSS context limitations. Extra queues silently do not
1335 * get RSS by default.
1338 rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
1339 uint16_t queue[queues];
1340 struct rte_flow_action_rss action_rss = {
1341 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
1344 .key_len = MLX4_RSS_HASH_KEY_SIZE,
1345 .queue_num = queues,
1346 .key = mlx4_rss_hash_key_default,
1349 struct rte_flow_action actions[] = {
1351 .type = RTE_FLOW_ACTION_TYPE_RSS,
1352 .conf = &action_rss,
1355 .type = RTE_FLOW_ACTION_TYPE_END,
1358 struct ether_addr *rule_mac = ð_spec.dst;
1359 rte_be16_t *rule_vlan =
1360 (priv->dev->data->dev_conf.rxmode.offloads &
1361 DEV_RX_OFFLOAD_VLAN_FILTER) &&
1362 !priv->dev->data->promiscuous ?
1366 struct rte_flow *flow;
1370 /* Nothing to be done if there are no Rx queues. */
1373 /* Prepare default RSS configuration. */
1374 for (i = 0; i != queues; ++i)
1377 * Set up VLAN item if filtering is enabled and at least one VLAN
1378 * filter is configured.
1381 vlan = mlx4_flow_internal_next_vlan(priv, 0);
1383 pattern[2] = (struct rte_flow_item){
1384 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1389 *rule_vlan = rte_cpu_to_be_16(vlan);
1394 for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
1395 const struct ether_addr *mac;
1397 /* Broadcasts are handled by an extra iteration. */
1398 if (i < RTE_DIM(priv->mac))
1399 mac = &priv->mac[i];
1401 mac = ð_mask.dst;
1402 if (is_zero_ether_addr(mac))
1404 /* Check if MAC flow rule is already present. */
1405 for (flow = LIST_FIRST(&priv->flows);
1406 flow && flow->internal;
1407 flow = LIST_NEXT(flow, next)) {
1408 const struct ibv_flow_spec_eth *eth =
1409 (const void *)((uintptr_t)flow->ibv_attr +
1410 sizeof(*flow->ibv_attr));
1415 assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
1416 assert(flow->ibv_attr->num_of_specs == 1);
1417 assert(eth->type == IBV_FLOW_SPEC_ETH);
1420 (eth->val.vlan_tag != *rule_vlan ||
1421 eth->mask.vlan_tag != RTE_BE16(0x0fff)))
1423 if (!rule_vlan && eth->mask.vlan_tag)
1425 for (j = 0; j != sizeof(mac->addr_bytes); ++j)
1426 if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
1427 eth->mask.dst_mac[j] != UINT8_C(0xff) ||
1428 eth->val.src_mac[j] != UINT8_C(0x00) ||
1429 eth->mask.src_mac[j] != UINT8_C(0x00))
1431 if (j != sizeof(mac->addr_bytes))
1433 if (flow->rss->queues != queues ||
1434 memcmp(flow->rss->queue_id, action_rss.queue,
1435 queues * sizeof(flow->rss->queue_id[0])))
1439 if (!flow || !flow->internal) {
1440 /* Not found, create a new flow rule. */
1441 memcpy(rule_mac, mac, sizeof(*mac));
1442 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1453 vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
1457 /* Take care of promiscuous and all multicast flow rules. */
1458 if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
1459 for (flow = LIST_FIRST(&priv->flows);
1460 flow && flow->internal;
1461 flow = LIST_NEXT(flow, next)) {
1462 if (priv->dev->data->promiscuous) {
1466 assert(priv->dev->data->all_multicast);
1471 if (flow && flow->internal) {
1473 if (flow->rss->queues != queues ||
1474 memcmp(flow->rss->queue_id, action_rss.queue,
1475 queues * sizeof(flow->rss->queue_id[0])))
1478 if (!flow || !flow->internal) {
1479 /* Not found, create a new flow rule. */
1480 if (priv->dev->data->promiscuous) {
1481 pattern[1].spec = NULL;
1482 pattern[1].mask = NULL;
1484 assert(priv->dev->data->all_multicast);
1485 pattern[1].spec = ð_allmulti;
1486 pattern[1].mask = ð_allmulti;
1488 pattern[2] = pattern[3];
1489 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1496 assert(flow->promisc || flow->allmulti);
1500 /* Clear selection and clean up stale internal flow rules. */
1501 flow = LIST_FIRST(&priv->flows);
1502 while (flow && flow->internal) {
1503 struct rte_flow *next = LIST_NEXT(flow, next);
1506 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1515 * Synchronize flow rules.
1517 * This function synchronizes flow rules with the state of the device by
1518 * taking into account isolated mode and whether target queues are
1522 * Pointer to private structure.
1524 * Perform verbose error reporting if not NULL.
1527 * 0 on success, a negative errno value otherwise and rte_errno is set.
1530 mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error)
1532 struct rte_flow *flow;
1535 /* Internal flow rules are guaranteed to come first in the list. */
1536 if (priv->isolated) {
1538 * Get rid of them in isolated mode, stop at the first
1539 * non-internal rule found.
1541 for (flow = LIST_FIRST(&priv->flows);
1542 flow && flow->internal;
1543 flow = LIST_FIRST(&priv->flows))
1544 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1546 /* Refresh internal rules. */
1547 ret = mlx4_flow_internal(priv, error);
1551 /* Toggle the remaining flow rules . */
1552 LIST_FOREACH(flow, &priv->flows, next) {
1553 ret = mlx4_flow_toggle(priv, flow, priv->started, error);
1558 assert(!priv->drop);
1563 * Clean up all flow rules.
1565 * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
1566 * rules regardless of whether they are internal or user-configured.
1569 * Pointer to private structure.
1572 mlx4_flow_clean(struct mlx4_priv *priv)
1574 struct rte_flow *flow;
1576 while ((flow = LIST_FIRST(&priv->flows)))
1577 mlx4_flow_destroy(priv->dev, flow, NULL);
1578 assert(LIST_EMPTY(&priv->rss));
1581 static const struct rte_flow_ops mlx4_flow_ops = {
1582 .validate = mlx4_flow_validate,
1583 .create = mlx4_flow_create,
1584 .destroy = mlx4_flow_destroy,
1585 .flush = mlx4_flow_flush,
1586 .isolate = mlx4_flow_isolate,
1590 * Manage filter operations.
1593 * Pointer to Ethernet device structure.
1594 * @param filter_type
1597 * Operation to perform.
1599 * Pointer to operation-specific structure.
1602 * 0 on success, negative errno value otherwise and rte_errno is set.
1605 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1606 enum rte_filter_type filter_type,
1607 enum rte_filter_op filter_op,
1610 switch (filter_type) {
1611 case RTE_ETH_FILTER_GENERIC:
1612 if (filter_op != RTE_ETH_FILTER_GET)
1614 *(const void **)arg = &mlx4_flow_ops;
1617 ERROR("%p: filter type (%d) not supported",
1618 (void *)dev, filter_type);
1621 rte_errno = ENOTSUP;