1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Flow API operations for mlx4 driver.
11 #include <arpa/inet.h>
18 #include <sys/queue.h>
20 /* Verbs headers do not support -pedantic. */
22 #pragma GCC diagnostic ignored "-Wpedantic"
24 #include <infiniband/verbs.h>
26 #pragma GCC diagnostic error "-Wpedantic"
29 #include <rte_byteorder.h>
30 #include <rte_errno.h>
31 #include <rte_eth_ctrl.h>
32 #include <rte_ethdev_driver.h>
33 #include <rte_ether.h>
35 #include <rte_flow_driver.h>
36 #include <rte_malloc.h>
40 #include "mlx4_glue.h"
41 #include "mlx4_flow.h"
42 #include "mlx4_rxtx.h"
43 #include "mlx4_utils.h"
45 /** Static initializer for a list of subsequent item types. */
46 #define NEXT_ITEM(...) \
47 (const enum rte_flow_item_type []){ \
48 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
51 /** Processor structure associated with a flow item. */
52 struct mlx4_flow_proc_item {
53 /** Bit-mask for fields supported by this PMD. */
54 const void *mask_support;
55 /** Bit-mask to use when @p item->mask is not provided. */
56 const void *mask_default;
57 /** Size in bytes for @p mask_support and @p mask_default. */
58 const unsigned int mask_sz;
59 /** Merge a pattern item into a flow rule handle. */
60 int (*merge)(struct rte_flow *flow,
61 const struct rte_flow_item *item,
62 const struct mlx4_flow_proc_item *proc,
63 struct rte_flow_error *error);
64 /** Size in bytes of the destination structure. */
65 const unsigned int dst_sz;
66 /** List of possible subsequent items. */
67 const enum rte_flow_item_type *const next_item;
70 /** Shared resources for drop flow rules. */
72 struct ibv_qp *qp; /**< QP target. */
73 struct ibv_cq *cq; /**< CQ associated with above QP. */
74 struct priv *priv; /**< Back pointer to private data. */
75 uint32_t refcnt; /**< Reference count. */
79 * Convert DPDK RSS hash types to their Verbs equivalent.
81 * This function returns the supported (default) set when @p types has
82 * special value (uint64_t)-1.
85 * Pointer to private structure.
87 * Hash types in DPDK format (see struct rte_eth_rss_conf).
90 * A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
91 * otherwise and rte_errno is set.
94 mlx4_conv_rss_types(struct priv *priv, uint64_t types)
96 enum { IPV4, IPV6, TCP, UDP, };
97 const uint64_t in[] = {
98 [IPV4] = (ETH_RSS_IPV4 |
100 ETH_RSS_NONFRAG_IPV4_TCP |
101 ETH_RSS_NONFRAG_IPV4_UDP |
102 ETH_RSS_NONFRAG_IPV4_OTHER),
103 [IPV6] = (ETH_RSS_IPV6 |
105 ETH_RSS_NONFRAG_IPV6_TCP |
106 ETH_RSS_NONFRAG_IPV6_UDP |
107 ETH_RSS_NONFRAG_IPV6_OTHER |
109 ETH_RSS_IPV6_TCP_EX |
110 ETH_RSS_IPV6_UDP_EX),
111 [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP |
112 ETH_RSS_NONFRAG_IPV6_TCP |
113 ETH_RSS_IPV6_TCP_EX),
114 [UDP] = (ETH_RSS_NONFRAG_IPV4_UDP |
115 ETH_RSS_NONFRAG_IPV6_UDP |
116 ETH_RSS_IPV6_UDP_EX),
118 const uint64_t out[RTE_DIM(in)] = {
119 [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
120 [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
121 [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
122 [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
128 if (types == (uint64_t)-1)
129 return priv->hw_rss_sup;
130 for (i = 0; i != RTE_DIM(in); ++i)
132 seen |= types & in[i];
135 if ((conv & priv->hw_rss_sup) == conv && !(types & ~seen))
142 * Merge Ethernet pattern item into flow rule handle.
144 * Additional mlx4-specific constraints on supported fields:
146 * - No support for partial masks, except in the specific case of matching
147 * all multicast traffic (@p spec->dst and @p mask->dst equal to
148 * 01:00:00:00:00:00).
149 * - Not providing @p item->spec or providing an empty @p mask->dst is
150 * *only* supported if the rule doesn't specify additional matching
151 * criteria (i.e. rule is promiscuous-like).
153 * @param[in, out] flow
154 * Flow rule handle to update.
156 * Pattern item to merge.
158 * Associated item-processing object.
160 * Perform verbose error reporting if not NULL.
163 * 0 on success, a negative errno value otherwise and rte_errno is set.
166 mlx4_flow_merge_eth(struct rte_flow *flow,
167 const struct rte_flow_item *item,
168 const struct mlx4_flow_proc_item *proc,
169 struct rte_flow_error *error)
171 const struct rte_flow_item_eth *spec = item->spec;
172 const struct rte_flow_item_eth *mask =
173 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
174 struct ibv_flow_spec_eth *eth;
181 uint32_t sum_dst = 0;
182 uint32_t sum_src = 0;
184 for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
185 sum_dst += mask->dst.addr_bytes[i];
186 sum_src += mask->src.addr_bytes[i];
189 msg = "mlx4 does not support source MAC matching";
191 } else if (!sum_dst) {
193 } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
194 if (!(spec->dst.addr_bytes[0] & 1)) {
195 msg = "mlx4 does not support the explicit"
196 " exclusion of all multicast traffic";
200 } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
201 msg = "mlx4 does not support matching partial"
209 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
212 if (flow->allmulti) {
213 flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
216 ++flow->ibv_attr->num_of_specs;
217 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
218 *eth = (struct ibv_flow_spec_eth) {
219 .type = IBV_FLOW_SPEC_ETH,
220 .size = sizeof(*eth),
222 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
223 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
224 /* Remove unwanted bits from values. */
225 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
226 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
230 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
235 * Merge VLAN pattern item into flow rule handle.
237 * Additional mlx4-specific constraints on supported fields:
239 * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
240 * empty @p item->mask would also include non-VLAN traffic. Doing so is
241 * therefore unsupported.
242 * - No support for partial masks.
244 * @param[in, out] flow
245 * Flow rule handle to update.
247 * Pattern item to merge.
249 * Associated item-processing object.
251 * Perform verbose error reporting if not NULL.
254 * 0 on success, a negative errno value otherwise and rte_errno is set.
257 mlx4_flow_merge_vlan(struct rte_flow *flow,
258 const struct rte_flow_item *item,
259 const struct mlx4_flow_proc_item *proc,
260 struct rte_flow_error *error)
262 const struct rte_flow_item_vlan *spec = item->spec;
263 const struct rte_flow_item_vlan *mask =
264 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
265 struct ibv_flow_spec_eth *eth;
268 if (!mask || !mask->tci) {
269 msg = "mlx4 cannot match all VLAN traffic while excluding"
270 " non-VLAN traffic, TCI VID must be specified";
273 if (mask->tci != RTE_BE16(0x0fff)) {
274 msg = "mlx4 does not support partial TCI VID matching";
279 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
281 eth->val.vlan_tag = spec->tci;
282 eth->mask.vlan_tag = mask->tci;
283 eth->val.vlan_tag &= eth->mask.vlan_tag;
286 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
291 * Merge IPv4 pattern item into flow rule handle.
293 * Additional mlx4-specific constraints on supported fields:
295 * - No support for partial masks.
297 * @param[in, out] flow
298 * Flow rule handle to update.
300 * Pattern item to merge.
302 * Associated item-processing object.
304 * Perform verbose error reporting if not NULL.
307 * 0 on success, a negative errno value otherwise and rte_errno is set.
310 mlx4_flow_merge_ipv4(struct rte_flow *flow,
311 const struct rte_flow_item *item,
312 const struct mlx4_flow_proc_item *proc,
313 struct rte_flow_error *error)
315 const struct rte_flow_item_ipv4 *spec = item->spec;
316 const struct rte_flow_item_ipv4 *mask =
317 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
318 struct ibv_flow_spec_ipv4 *ipv4;
322 ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
323 (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
324 msg = "mlx4 does not support matching partial IPv4 fields";
329 ++flow->ibv_attr->num_of_specs;
330 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
331 *ipv4 = (struct ibv_flow_spec_ipv4) {
332 .type = IBV_FLOW_SPEC_IPV4,
333 .size = sizeof(*ipv4),
337 ipv4->val = (struct ibv_flow_ipv4_filter) {
338 .src_ip = spec->hdr.src_addr,
339 .dst_ip = spec->hdr.dst_addr,
341 ipv4->mask = (struct ibv_flow_ipv4_filter) {
342 .src_ip = mask->hdr.src_addr,
343 .dst_ip = mask->hdr.dst_addr,
345 /* Remove unwanted bits from values. */
346 ipv4->val.src_ip &= ipv4->mask.src_ip;
347 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
350 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
355 * Merge UDP pattern item into flow rule handle.
357 * Additional mlx4-specific constraints on supported fields:
359 * - No support for partial masks.
360 * - Due to HW/FW limitation, flow rule priority is not taken into account
361 * when matching UDP destination ports, doing is therefore only supported
362 * at the highest priority level (0).
364 * @param[in, out] flow
365 * Flow rule handle to update.
367 * Pattern item to merge.
369 * Associated item-processing object.
371 * Perform verbose error reporting if not NULL.
374 * 0 on success, a negative errno value otherwise and rte_errno is set.
377 mlx4_flow_merge_udp(struct rte_flow *flow,
378 const struct rte_flow_item *item,
379 const struct mlx4_flow_proc_item *proc,
380 struct rte_flow_error *error)
382 const struct rte_flow_item_udp *spec = item->spec;
383 const struct rte_flow_item_udp *mask =
384 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
385 struct ibv_flow_spec_tcp_udp *udp;
389 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
390 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
391 msg = "mlx4 does not support matching partial UDP fields";
394 if (mask && mask->hdr.dst_port && flow->priority) {
395 msg = "combining UDP destination port matching with a nonzero"
396 " priority level is not supported";
401 ++flow->ibv_attr->num_of_specs;
402 udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
403 *udp = (struct ibv_flow_spec_tcp_udp) {
404 .type = IBV_FLOW_SPEC_UDP,
405 .size = sizeof(*udp),
409 udp->val.dst_port = spec->hdr.dst_port;
410 udp->val.src_port = spec->hdr.src_port;
411 udp->mask.dst_port = mask->hdr.dst_port;
412 udp->mask.src_port = mask->hdr.src_port;
413 /* Remove unwanted bits from values. */
414 udp->val.src_port &= udp->mask.src_port;
415 udp->val.dst_port &= udp->mask.dst_port;
418 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
423 * Merge TCP pattern item into flow rule handle.
425 * Additional mlx4-specific constraints on supported fields:
427 * - No support for partial masks.
429 * @param[in, out] flow
430 * Flow rule handle to update.
432 * Pattern item to merge.
434 * Associated item-processing object.
436 * Perform verbose error reporting if not NULL.
439 * 0 on success, a negative errno value otherwise and rte_errno is set.
442 mlx4_flow_merge_tcp(struct rte_flow *flow,
443 const struct rte_flow_item *item,
444 const struct mlx4_flow_proc_item *proc,
445 struct rte_flow_error *error)
447 const struct rte_flow_item_tcp *spec = item->spec;
448 const struct rte_flow_item_tcp *mask =
449 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
450 struct ibv_flow_spec_tcp_udp *tcp;
454 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
455 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
456 msg = "mlx4 does not support matching partial TCP fields";
461 ++flow->ibv_attr->num_of_specs;
462 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
463 *tcp = (struct ibv_flow_spec_tcp_udp) {
464 .type = IBV_FLOW_SPEC_TCP,
465 .size = sizeof(*tcp),
469 tcp->val.dst_port = spec->hdr.dst_port;
470 tcp->val.src_port = spec->hdr.src_port;
471 tcp->mask.dst_port = mask->hdr.dst_port;
472 tcp->mask.src_port = mask->hdr.src_port;
473 /* Remove unwanted bits from values. */
474 tcp->val.src_port &= tcp->mask.src_port;
475 tcp->val.dst_port &= tcp->mask.dst_port;
478 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
483 * Perform basic sanity checks on a pattern item.
486 * Item specification.
488 * Associated item-processing object.
490 * Perform verbose error reporting if not NULL.
493 * 0 on success, a negative errno value otherwise and rte_errno is set.
496 mlx4_flow_item_check(const struct rte_flow_item *item,
497 const struct mlx4_flow_proc_item *proc,
498 struct rte_flow_error *error)
503 /* item->last and item->mask cannot exist without item->spec. */
504 if (!item->spec && (item->mask || item->last))
505 return rte_flow_error_set
506 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
507 "\"mask\" or \"last\" field provided without a"
508 " corresponding \"spec\"");
509 /* No spec, no mask, no problem. */
513 (const uint8_t *)item->mask :
514 (const uint8_t *)proc->mask_default;
517 * Single-pass check to make sure that:
518 * - Mask is supported, no bits are set outside proc->mask_support.
519 * - Both item->spec and item->last are included in mask.
521 for (i = 0; i != proc->mask_sz; ++i) {
524 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
525 ((const uint8_t *)proc->mask_support)[i])
526 return rte_flow_error_set
527 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
528 item, "unsupported field found in \"mask\"");
530 (((const uint8_t *)item->spec)[i] & mask[i]) !=
531 (((const uint8_t *)item->last)[i] & mask[i]))
532 return rte_flow_error_set
533 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
535 "range between \"spec\" and \"last\""
536 " is larger than \"mask\"");
541 /** Graph of supported items and associated actions. */
542 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
543 [RTE_FLOW_ITEM_TYPE_END] = {
544 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
546 [RTE_FLOW_ITEM_TYPE_ETH] = {
547 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
548 RTE_FLOW_ITEM_TYPE_IPV4),
549 .mask_support = &(const struct rte_flow_item_eth){
550 /* Only destination MAC can be matched. */
551 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
553 .mask_default = &rte_flow_item_eth_mask,
554 .mask_sz = sizeof(struct rte_flow_item_eth),
555 .merge = mlx4_flow_merge_eth,
556 .dst_sz = sizeof(struct ibv_flow_spec_eth),
558 [RTE_FLOW_ITEM_TYPE_VLAN] = {
559 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
560 .mask_support = &(const struct rte_flow_item_vlan){
561 /* Only TCI VID matching is supported. */
562 .tci = RTE_BE16(0x0fff),
564 .mask_default = &rte_flow_item_vlan_mask,
565 .mask_sz = sizeof(struct rte_flow_item_vlan),
566 .merge = mlx4_flow_merge_vlan,
569 [RTE_FLOW_ITEM_TYPE_IPV4] = {
570 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
571 RTE_FLOW_ITEM_TYPE_TCP),
572 .mask_support = &(const struct rte_flow_item_ipv4){
574 .src_addr = RTE_BE32(0xffffffff),
575 .dst_addr = RTE_BE32(0xffffffff),
578 .mask_default = &rte_flow_item_ipv4_mask,
579 .mask_sz = sizeof(struct rte_flow_item_ipv4),
580 .merge = mlx4_flow_merge_ipv4,
581 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
583 [RTE_FLOW_ITEM_TYPE_UDP] = {
584 .mask_support = &(const struct rte_flow_item_udp){
586 .src_port = RTE_BE16(0xffff),
587 .dst_port = RTE_BE16(0xffff),
590 .mask_default = &rte_flow_item_udp_mask,
591 .mask_sz = sizeof(struct rte_flow_item_udp),
592 .merge = mlx4_flow_merge_udp,
593 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
595 [RTE_FLOW_ITEM_TYPE_TCP] = {
596 .mask_support = &(const struct rte_flow_item_tcp){
598 .src_port = RTE_BE16(0xffff),
599 .dst_port = RTE_BE16(0xffff),
602 .mask_default = &rte_flow_item_tcp_mask,
603 .mask_sz = sizeof(struct rte_flow_item_tcp),
604 .merge = mlx4_flow_merge_tcp,
605 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
610 * Make sure a flow rule is supported and initialize associated structure.
613 * Pointer to private structure.
615 * Flow rule attributes.
617 * Pattern specification (list terminated by the END pattern item).
619 * Associated actions (list terminated by the END action).
621 * Perform verbose error reporting if not NULL.
622 * @param[in, out] addr
623 * Buffer where the resulting flow rule handle pointer must be stored.
624 * If NULL, stop processing after validation stage.
627 * 0 on success, a negative errno value otherwise and rte_errno is set.
630 mlx4_flow_prepare(struct priv *priv,
631 const struct rte_flow_attr *attr,
632 const struct rte_flow_item pattern[],
633 const struct rte_flow_action actions[],
634 struct rte_flow_error *error,
635 struct rte_flow **addr)
637 const struct rte_flow_item *item;
638 const struct rte_flow_action *action;
639 const struct mlx4_flow_proc_item *proc;
640 struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
641 struct rte_flow *flow = &temp;
642 const char *msg = NULL;
646 return rte_flow_error_set
647 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
648 NULL, "groups are not supported");
649 if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
650 return rte_flow_error_set
651 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
652 NULL, "maximum priority level is "
653 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
655 return rte_flow_error_set
656 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
657 NULL, "egress is not supported");
659 return rte_flow_error_set
660 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
661 NULL, "transfer is not supported");
663 return rte_flow_error_set
664 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
665 NULL, "only ingress is supported");
668 proc = mlx4_flow_proc_item_list;
669 flow->priority = attr->priority;
670 /* Go over pattern. */
671 for (item = pattern; item->type; ++item) {
672 const struct mlx4_flow_proc_item *next = NULL;
676 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
678 if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
682 if (flow->promisc || flow->allmulti) {
683 msg = "mlx4 does not support additional matching"
684 " criteria combined with indiscriminate"
685 " matching on Ethernet headers";
686 goto exit_item_not_supported;
688 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
689 if (proc->next_item[i] == item->type) {
690 next = &mlx4_flow_proc_item_list[item->type];
695 goto exit_item_not_supported;
698 * Perform basic sanity checks only once, while handle is
702 err = mlx4_flow_item_check(item, proc, error);
707 err = proc->merge(flow, item, proc, error);
711 flow->ibv_attr_size += proc->dst_sz;
713 /* Go over actions list. */
714 for (action = actions; action->type; ++action) {
715 /* This one may appear anywhere multiple times. */
716 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
718 /* Fate-deciding actions may appear exactly once. */
720 msg = "cannot combine several fate-deciding actions,"
721 " choose between DROP, QUEUE or RSS";
722 goto exit_action_not_supported;
725 switch (action->type) {
726 const struct rte_flow_action_queue *queue;
727 const struct rte_flow_action_rss *rss;
728 const uint8_t *rss_key;
729 uint32_t rss_key_len;
733 case RTE_FLOW_ACTION_TYPE_DROP:
736 case RTE_FLOW_ACTION_TYPE_QUEUE:
739 queue = action->conf;
740 if (queue->index >= priv->dev->data->nb_rx_queues) {
741 msg = "queue target index beyond number of"
742 " configured Rx queues";
743 goto exit_action_not_supported;
745 flow->rss = mlx4_rss_get
746 (priv, 0, mlx4_rss_hash_key_default, 1,
749 msg = "not enough resources for additional"
750 " single-queue RSS context";
751 goto exit_action_not_supported;
754 case RTE_FLOW_ACTION_TYPE_RSS:
758 /* Default RSS configuration if none is provided. */
761 rss_key_len = rss->key_len;
763 rss_key = mlx4_rss_hash_key_default;
764 rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
767 for (i = 0; i < rss->queue_num; ++i)
769 priv->dev->data->nb_rx_queues)
771 if (i != rss->queue_num) {
772 msg = "queue index target beyond number of"
773 " configured Rx queues";
774 goto exit_action_not_supported;
776 if (!rte_is_power_of_2(rss->queue_num)) {
777 msg = "for RSS, mlx4 requires the number of"
778 " queues to be a power of two";
779 goto exit_action_not_supported;
781 if (rss_key_len != sizeof(flow->rss->key)) {
782 msg = "mlx4 supports exactly one RSS hash key"
784 MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
785 goto exit_action_not_supported;
787 for (i = 1; i < rss->queue_num; ++i)
788 if (rss->queue[i] - rss->queue[i - 1] != 1)
790 if (i != rss->queue_num) {
791 msg = "mlx4 requires RSS contexts to use"
792 " consecutive queue indices only";
793 goto exit_action_not_supported;
795 if (rss->queue[0] % rss->queue_num) {
796 msg = "mlx4 requires the first queue of a RSS"
797 " context to be aligned on a multiple"
798 " of the context size";
799 goto exit_action_not_supported;
802 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
803 msg = "the only supported RSS hash function"
805 goto exit_action_not_supported;
808 msg = "a nonzero RSS encapsulation level is"
810 goto exit_action_not_supported;
813 fields = mlx4_conv_rss_types(priv, rss->types);
814 if (fields == (uint64_t)-1 && rte_errno) {
815 msg = "unsupported RSS hash type requested";
816 goto exit_action_not_supported;
818 flow->rss = mlx4_rss_get
819 (priv, fields, rss_key, rss->queue_num,
822 msg = "either invalid parameters or not enough"
823 " resources for additional multi-queue"
825 goto exit_action_not_supported;
829 goto exit_action_not_supported;
832 /* When fate is unknown, drop traffic. */
835 /* Validation ends here. */
838 mlx4_rss_put(flow->rss);
842 /* Allocate proper handle based on collected data. */
843 const struct mlx4_malloc_vec vec[] = {
845 .align = alignof(struct rte_flow),
846 .size = sizeof(*flow),
847 .addr = (void **)&flow,
850 .align = alignof(struct ibv_flow_attr),
851 .size = temp.ibv_attr_size,
852 .addr = (void **)&temp.ibv_attr,
856 if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) {
858 mlx4_rss_put(temp.rss);
859 return rte_flow_error_set
861 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
862 "flow rule handle allocation failure");
864 /* Most fields will be updated by second pass. */
865 *flow = (struct rte_flow){
866 .ibv_attr = temp.ibv_attr,
867 .ibv_attr_size = sizeof(*flow->ibv_attr),
870 *flow->ibv_attr = (struct ibv_flow_attr){
871 .type = IBV_FLOW_ATTR_NORMAL,
872 .size = sizeof(*flow->ibv_attr),
873 .priority = attr->priority,
880 exit_item_not_supported:
881 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
882 item, msg ? msg : "item not supported");
883 exit_action_not_supported:
884 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
885 action, msg ? msg : "action not supported");
889 * Validate a flow supported by the NIC.
891 * @see rte_flow_validate()
895 mlx4_flow_validate(struct rte_eth_dev *dev,
896 const struct rte_flow_attr *attr,
897 const struct rte_flow_item pattern[],
898 const struct rte_flow_action actions[],
899 struct rte_flow_error *error)
901 struct priv *priv = dev->data->dev_private;
903 return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
907 * Get a drop flow rule resources instance.
910 * Pointer to private structure.
913 * Pointer to drop flow resources on success, NULL otherwise and rte_errno
916 static struct mlx4_drop *
917 mlx4_drop_get(struct priv *priv)
919 struct mlx4_drop *drop = priv->drop;
922 assert(drop->refcnt);
923 assert(drop->priv == priv);
927 drop = rte_malloc(__func__, sizeof(*drop), 0);
930 *drop = (struct mlx4_drop){
934 drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
937 drop->qp = mlx4_glue->create_qp
939 &(struct ibv_qp_init_attr){
942 .qp_type = IBV_QPT_RAW_PACKET,
950 claim_zero(mlx4_glue->destroy_qp(drop->qp));
952 claim_zero(mlx4_glue->destroy_cq(drop->cq));
960 * Give back a drop flow rule resources instance.
963 * Pointer to drop flow rule resources.
966 mlx4_drop_put(struct mlx4_drop *drop)
968 assert(drop->refcnt);
971 drop->priv->drop = NULL;
972 claim_zero(mlx4_glue->destroy_qp(drop->qp));
973 claim_zero(mlx4_glue->destroy_cq(drop->cq));
978 * Toggle a configured flow rule.
981 * Pointer to private structure.
983 * Flow rule handle to toggle.
985 * Whether associated Verbs flow must be created or removed.
987 * Perform verbose error reporting if not NULL.
990 * 0 on success, a negative errno value otherwise and rte_errno is set.
993 mlx4_flow_toggle(struct priv *priv,
994 struct rte_flow *flow,
996 struct rte_flow_error *error)
998 struct ibv_qp *qp = NULL;
1003 if (!flow->ibv_flow)
1005 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1006 flow->ibv_flow = NULL;
1008 mlx4_drop_put(priv->drop);
1010 mlx4_rss_detach(flow->rss);
1013 assert(flow->ibv_attr);
1014 if (!flow->internal &&
1016 flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
1017 if (flow->ibv_flow) {
1018 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1019 flow->ibv_flow = NULL;
1021 mlx4_drop_put(priv->drop);
1023 mlx4_rss_detach(flow->rss);
1026 msg = ("priority level "
1027 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
1028 " is reserved when not in isolated mode");
1032 struct mlx4_rss *rss = flow->rss;
1036 /* Stop at the first nonexistent target queue. */
1037 for (i = 0; i != rss->queues; ++i)
1038 if (rss->queue_id[i] >=
1039 priv->dev->data->nb_rx_queues ||
1040 !priv->dev->data->rx_queues[rss->queue_id[i]]) {
1044 if (flow->ibv_flow) {
1045 if (missing ^ !flow->drop)
1047 /* Verbs flow needs updating. */
1048 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1049 flow->ibv_flow = NULL;
1051 mlx4_drop_put(priv->drop);
1053 mlx4_rss_detach(rss);
1056 err = mlx4_rss_attach(rss);
1059 msg = "cannot create indirection table or hash"
1060 " QP to associate flow rule with";
1065 /* A missing target queue drops traffic implicitly. */
1066 flow->drop = missing;
1071 mlx4_drop_get(priv);
1074 msg = "resources for drop flow rule cannot be created";
1077 qp = priv->drop->qp;
1082 flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
1086 mlx4_drop_put(priv->drop);
1088 mlx4_rss_detach(flow->rss);
1090 msg = "flow rule rejected by device";
1092 return rte_flow_error_set
1093 (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
1099 * @see rte_flow_create()
1102 static struct rte_flow *
1103 mlx4_flow_create(struct rte_eth_dev *dev,
1104 const struct rte_flow_attr *attr,
1105 const struct rte_flow_item pattern[],
1106 const struct rte_flow_action actions[],
1107 struct rte_flow_error *error)
1109 struct priv *priv = dev->data->dev_private;
1110 struct rte_flow *flow;
1113 err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
1116 err = mlx4_flow_toggle(priv, flow, priv->started, error);
1118 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1120 /* New rules are inserted after internal ones. */
1121 if (!curr || !curr->internal) {
1122 LIST_INSERT_HEAD(&priv->flows, flow, next);
1124 while (LIST_NEXT(curr, next) &&
1125 LIST_NEXT(curr, next)->internal)
1126 curr = LIST_NEXT(curr, next);
1127 LIST_INSERT_AFTER(curr, flow, next);
1132 mlx4_rss_put(flow->rss);
1133 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1140 * Configure isolated mode.
1142 * @see rte_flow_isolate()
1146 mlx4_flow_isolate(struct rte_eth_dev *dev,
1148 struct rte_flow_error *error)
1150 struct priv *priv = dev->data->dev_private;
1152 if (!!enable == !!priv->isolated)
1154 priv->isolated = !!enable;
1155 if (mlx4_flow_sync(priv, error)) {
1156 priv->isolated = !enable;
1163 * Destroy a flow rule.
1165 * @see rte_flow_destroy()
1169 mlx4_flow_destroy(struct rte_eth_dev *dev,
1170 struct rte_flow *flow,
1171 struct rte_flow_error *error)
1173 struct priv *priv = dev->data->dev_private;
1174 int err = mlx4_flow_toggle(priv, flow, 0, error);
1178 LIST_REMOVE(flow, next);
1180 mlx4_rss_put(flow->rss);
1186 * Destroy user-configured flow rules.
1188 * This function skips internal flows rules.
1190 * @see rte_flow_flush()
1194 mlx4_flow_flush(struct rte_eth_dev *dev,
1195 struct rte_flow_error *error)
1197 struct priv *priv = dev->data->dev_private;
1198 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1201 struct rte_flow *next = LIST_NEXT(flow, next);
1203 if (!flow->internal)
1204 mlx4_flow_destroy(dev, flow, error);
1211 * Helper function to determine the next configured VLAN filter.
1214 * Pointer to private structure.
1216 * VLAN ID to use as a starting point.
1219 * Next configured VLAN ID or a high value (>= 4096) if there is none.
1222 mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
1224 while (vlan < 4096) {
1225 if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
1226 (UINT64_C(1) << (vlan % 64)))
1234 * Generate internal flow rules.
1236 * Various flow rules are created depending on the mode the device is in:
1239 * port MAC + broadcast + catch-all (VLAN filtering is ignored).
1241 * port MAC/VLAN + broadcast + catch-all multicast.
1243 * port MAC/VLAN + broadcast MAC/VLAN.
1245 * About MAC flow rules:
1247 * - MAC flow rules are generated from @p dev->data->mac_addrs
1248 * (@p priv->mac array).
1249 * - An additional flow rule for Ethernet broadcasts is also generated.
1250 * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
1251 * is enabled and VLAN filters are configured.
1254 * Pointer to private structure.
1256 * Perform verbose error reporting if not NULL.
1259 * 0 on success, a negative errno value otherwise and rte_errno is set.
1262 mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
1264 struct rte_flow_attr attr = {
1265 .priority = MLX4_FLOW_PRIORITY_LAST,
1268 struct rte_flow_item_eth eth_spec;
1269 const struct rte_flow_item_eth eth_mask = {
1270 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1272 const struct rte_flow_item_eth eth_allmulti = {
1273 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1275 struct rte_flow_item_vlan vlan_spec;
1276 const struct rte_flow_item_vlan vlan_mask = {
1277 .tci = RTE_BE16(0x0fff),
1279 struct rte_flow_item pattern[] = {
1281 .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
1284 .type = RTE_FLOW_ITEM_TYPE_ETH,
1289 /* Replaced with VLAN if filtering is enabled. */
1290 .type = RTE_FLOW_ITEM_TYPE_END,
1293 .type = RTE_FLOW_ITEM_TYPE_END,
1297 * Round number of queues down to their previous power of 2 to
1298 * comply with RSS context limitations. Extra queues silently do not
1299 * get RSS by default.
1302 rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
1303 uint16_t queue[queues];
1304 struct rte_flow_action_rss action_rss = {
1305 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
1308 .key_len = MLX4_RSS_HASH_KEY_SIZE,
1309 .queue_num = queues,
1310 .key = mlx4_rss_hash_key_default,
1313 struct rte_flow_action actions[] = {
1315 .type = RTE_FLOW_ACTION_TYPE_RSS,
1316 .conf = &action_rss,
1319 .type = RTE_FLOW_ACTION_TYPE_END,
1322 struct ether_addr *rule_mac = ð_spec.dst;
1323 rte_be16_t *rule_vlan =
1324 (priv->dev->data->dev_conf.rxmode.offloads &
1325 DEV_RX_OFFLOAD_VLAN_FILTER) &&
1326 !priv->dev->data->promiscuous ?
1330 struct rte_flow *flow;
1334 /* Nothing to be done if there are no Rx queues. */
1337 /* Prepare default RSS configuration. */
1338 for (i = 0; i != queues; ++i)
1341 * Set up VLAN item if filtering is enabled and at least one VLAN
1342 * filter is configured.
1345 vlan = mlx4_flow_internal_next_vlan(priv, 0);
1347 pattern[2] = (struct rte_flow_item){
1348 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1353 *rule_vlan = rte_cpu_to_be_16(vlan);
1358 for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
1359 const struct ether_addr *mac;
1361 /* Broadcasts are handled by an extra iteration. */
1362 if (i < RTE_DIM(priv->mac))
1363 mac = &priv->mac[i];
1365 mac = ð_mask.dst;
1366 if (is_zero_ether_addr(mac))
1368 /* Check if MAC flow rule is already present. */
1369 for (flow = LIST_FIRST(&priv->flows);
1370 flow && flow->internal;
1371 flow = LIST_NEXT(flow, next)) {
1372 const struct ibv_flow_spec_eth *eth =
1373 (const void *)((uintptr_t)flow->ibv_attr +
1374 sizeof(*flow->ibv_attr));
1379 assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
1380 assert(flow->ibv_attr->num_of_specs == 1);
1381 assert(eth->type == IBV_FLOW_SPEC_ETH);
1384 (eth->val.vlan_tag != *rule_vlan ||
1385 eth->mask.vlan_tag != RTE_BE16(0x0fff)))
1387 if (!rule_vlan && eth->mask.vlan_tag)
1389 for (j = 0; j != sizeof(mac->addr_bytes); ++j)
1390 if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
1391 eth->mask.dst_mac[j] != UINT8_C(0xff) ||
1392 eth->val.src_mac[j] != UINT8_C(0x00) ||
1393 eth->mask.src_mac[j] != UINT8_C(0x00))
1395 if (j != sizeof(mac->addr_bytes))
1397 if (flow->rss->queues != queues ||
1398 memcmp(flow->rss->queue_id, action_rss.queue,
1399 queues * sizeof(flow->rss->queue_id[0])))
1403 if (!flow || !flow->internal) {
1404 /* Not found, create a new flow rule. */
1405 memcpy(rule_mac, mac, sizeof(*mac));
1406 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1417 vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
1421 /* Take care of promiscuous and all multicast flow rules. */
1422 if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
1423 for (flow = LIST_FIRST(&priv->flows);
1424 flow && flow->internal;
1425 flow = LIST_NEXT(flow, next)) {
1426 if (priv->dev->data->promiscuous) {
1430 assert(priv->dev->data->all_multicast);
1435 if (flow && flow->internal) {
1437 if (flow->rss->queues != queues ||
1438 memcmp(flow->rss->queue_id, action_rss.queue,
1439 queues * sizeof(flow->rss->queue_id[0])))
1442 if (!flow || !flow->internal) {
1443 /* Not found, create a new flow rule. */
1444 if (priv->dev->data->promiscuous) {
1445 pattern[1].spec = NULL;
1446 pattern[1].mask = NULL;
1448 assert(priv->dev->data->all_multicast);
1449 pattern[1].spec = ð_allmulti;
1450 pattern[1].mask = ð_allmulti;
1452 pattern[2] = pattern[3];
1453 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1460 assert(flow->promisc || flow->allmulti);
1464 /* Clear selection and clean up stale internal flow rules. */
1465 flow = LIST_FIRST(&priv->flows);
1466 while (flow && flow->internal) {
1467 struct rte_flow *next = LIST_NEXT(flow, next);
1470 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1479 * Synchronize flow rules.
1481 * This function synchronizes flow rules with the state of the device by
1482 * taking into account isolated mode and whether target queues are
1486 * Pointer to private structure.
1488 * Perform verbose error reporting if not NULL.
1491 * 0 on success, a negative errno value otherwise and rte_errno is set.
1494 mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
1496 struct rte_flow *flow;
1499 /* Internal flow rules are guaranteed to come first in the list. */
1500 if (priv->isolated) {
1502 * Get rid of them in isolated mode, stop at the first
1503 * non-internal rule found.
1505 for (flow = LIST_FIRST(&priv->flows);
1506 flow && flow->internal;
1507 flow = LIST_FIRST(&priv->flows))
1508 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1510 /* Refresh internal rules. */
1511 ret = mlx4_flow_internal(priv, error);
1515 /* Toggle the remaining flow rules . */
1516 LIST_FOREACH(flow, &priv->flows, next) {
1517 ret = mlx4_flow_toggle(priv, flow, priv->started, error);
1522 assert(!priv->drop);
1527 * Clean up all flow rules.
1529 * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
1530 * rules regardless of whether they are internal or user-configured.
1533 * Pointer to private structure.
1536 mlx4_flow_clean(struct priv *priv)
1538 struct rte_flow *flow;
1540 while ((flow = LIST_FIRST(&priv->flows)))
1541 mlx4_flow_destroy(priv->dev, flow, NULL);
1542 assert(LIST_EMPTY(&priv->rss));
1545 static const struct rte_flow_ops mlx4_flow_ops = {
1546 .validate = mlx4_flow_validate,
1547 .create = mlx4_flow_create,
1548 .destroy = mlx4_flow_destroy,
1549 .flush = mlx4_flow_flush,
1550 .isolate = mlx4_flow_isolate,
1554 * Manage filter operations.
1557 * Pointer to Ethernet device structure.
1558 * @param filter_type
1561 * Operation to perform.
1563 * Pointer to operation-specific structure.
1566 * 0 on success, negative errno value otherwise and rte_errno is set.
1569 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1570 enum rte_filter_type filter_type,
1571 enum rte_filter_op filter_op,
1574 switch (filter_type) {
1575 case RTE_ETH_FILTER_GENERIC:
1576 if (filter_op != RTE_ETH_FILTER_GET)
1578 *(const void **)arg = &mlx4_flow_ops;
1581 ERROR("%p: filter type (%d) not supported",
1582 (void *)dev, filter_type);
1585 rte_errno = ENOTSUP;