4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Flow API operations for mlx4 driver.
39 #include <arpa/inet.h>
46 #include <sys/queue.h>
48 /* Verbs headers do not support -pedantic. */
50 #pragma GCC diagnostic ignored "-Wpedantic"
52 #include <infiniband/verbs.h>
54 #pragma GCC diagnostic error "-Wpedantic"
57 #include <rte_byteorder.h>
58 #include <rte_errno.h>
59 #include <rte_eth_ctrl.h>
60 #include <rte_ethdev.h>
61 #include <rte_ether.h>
63 #include <rte_flow_driver.h>
64 #include <rte_malloc.h>
68 #include "mlx4_flow.h"
69 #include "mlx4_rxtx.h"
70 #include "mlx4_utils.h"
72 /** Static initializer for a list of subsequent item types. */
73 #define NEXT_ITEM(...) \
74 (const enum rte_flow_item_type []){ \
75 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
78 /** Processor structure associated with a flow item. */
79 struct mlx4_flow_proc_item {
80 /** Bit-mask for fields supported by this PMD. */
81 const void *mask_support;
82 /** Bit-mask to use when @p item->mask is not provided. */
83 const void *mask_default;
84 /** Size in bytes for @p mask_support and @p mask_default. */
85 const unsigned int mask_sz;
86 /** Merge a pattern item into a flow rule handle. */
87 int (*merge)(struct rte_flow *flow,
88 const struct rte_flow_item *item,
89 const struct mlx4_flow_proc_item *proc,
90 struct rte_flow_error *error);
91 /** Size in bytes of the destination structure. */
92 const unsigned int dst_sz;
93 /** List of possible subsequent items. */
94 const enum rte_flow_item_type *const next_item;
97 /** Shared resources for drop flow rules. */
99 struct ibv_qp *qp; /**< QP target. */
100 struct ibv_cq *cq; /**< CQ associated with above QP. */
101 struct priv *priv; /**< Back pointer to private data. */
102 uint32_t refcnt; /**< Reference count. */
106 * Convert DPDK RSS hash fields to their Verbs equivalent.
108 * This function returns the supported (default) set when @p rss_hf has
109 * special value (uint64_t)-1.
112 * Hash fields in DPDK format (see struct rte_eth_rss_conf).
115 * A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
116 * otherwise and rte_errno is set.
119 mlx4_conv_rss_hf(uint64_t rss_hf)
121 enum { IPV4, IPV6, TCP, UDP, };
122 const uint64_t in[] = {
123 [IPV4] = (ETH_RSS_IPV4 |
125 ETH_RSS_NONFRAG_IPV4_TCP |
126 ETH_RSS_NONFRAG_IPV4_UDP |
127 ETH_RSS_NONFRAG_IPV4_OTHER),
128 [IPV6] = (ETH_RSS_IPV6 |
130 ETH_RSS_NONFRAG_IPV6_TCP |
131 ETH_RSS_NONFRAG_IPV6_UDP |
132 ETH_RSS_NONFRAG_IPV6_OTHER |
134 ETH_RSS_IPV6_TCP_EX |
135 ETH_RSS_IPV6_UDP_EX),
136 [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP |
137 ETH_RSS_NONFRAG_IPV6_TCP |
138 ETH_RSS_IPV6_TCP_EX),
140 * UDP support is temporarily disabled due to an
141 * implementation issue in the kernel.
145 const uint64_t out[RTE_DIM(in)] = {
146 [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
147 [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
148 [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
149 [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
155 for (i = 0; i != RTE_DIM(in); ++i)
156 if (rss_hf & in[i]) {
157 seen |= rss_hf & in[i];
160 if (rss_hf == (uint64_t)-1)
162 if (!(rss_hf & ~seen))
169 * Merge Ethernet pattern item into flow rule handle.
171 * Additional mlx4-specific constraints on supported fields:
173 * - No support for partial masks, except in the specific case of matching
174 * all multicast traffic (@p spec->dst and @p mask->dst equal to
175 * 01:00:00:00:00:00).
176 * - Not providing @p item->spec or providing an empty @p mask->dst is
177 * *only* supported if the rule doesn't specify additional matching
178 * criteria (i.e. rule is promiscuous-like).
180 * @param[in, out] flow
181 * Flow rule handle to update.
183 * Pattern item to merge.
185 * Associated item-processing object.
187 * Perform verbose error reporting if not NULL.
190 * 0 on success, a negative errno value otherwise and rte_errno is set.
193 mlx4_flow_merge_eth(struct rte_flow *flow,
194 const struct rte_flow_item *item,
195 const struct mlx4_flow_proc_item *proc,
196 struct rte_flow_error *error)
198 const struct rte_flow_item_eth *spec = item->spec;
199 const struct rte_flow_item_eth *mask =
200 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
201 struct ibv_flow_spec_eth *eth;
208 uint32_t sum_dst = 0;
209 uint32_t sum_src = 0;
211 for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
212 sum_dst += mask->dst.addr_bytes[i];
213 sum_src += mask->src.addr_bytes[i];
216 msg = "mlx4 does not support source MAC matching";
218 } else if (!sum_dst) {
220 } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
221 if (!(spec->dst.addr_bytes[0] & 1)) {
222 msg = "mlx4 does not support the explicit"
223 " exclusion of all multicast traffic";
227 } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
228 msg = "mlx4 does not support matching partial"
236 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
239 if (flow->allmulti) {
240 flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
243 ++flow->ibv_attr->num_of_specs;
244 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
245 *eth = (struct ibv_flow_spec_eth) {
246 .type = IBV_FLOW_SPEC_ETH,
247 .size = sizeof(*eth),
249 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
250 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
251 /* Remove unwanted bits from values. */
252 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
253 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
257 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
262 * Merge VLAN pattern item into flow rule handle.
264 * Additional mlx4-specific constraints on supported fields:
266 * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
267 * empty @p item->mask would also include non-VLAN traffic. Doing so is
268 * therefore unsupported.
269 * - No support for partial masks.
271 * @param[in, out] flow
272 * Flow rule handle to update.
274 * Pattern item to merge.
276 * Associated item-processing object.
278 * Perform verbose error reporting if not NULL.
281 * 0 on success, a negative errno value otherwise and rte_errno is set.
284 mlx4_flow_merge_vlan(struct rte_flow *flow,
285 const struct rte_flow_item *item,
286 const struct mlx4_flow_proc_item *proc,
287 struct rte_flow_error *error)
289 const struct rte_flow_item_vlan *spec = item->spec;
290 const struct rte_flow_item_vlan *mask =
291 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
292 struct ibv_flow_spec_eth *eth;
295 if (!mask || !mask->tci) {
296 msg = "mlx4 cannot match all VLAN traffic while excluding"
297 " non-VLAN traffic, TCI VID must be specified";
300 if (mask->tci != RTE_BE16(0x0fff)) {
301 msg = "mlx4 does not support partial TCI VID matching";
306 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
308 eth->val.vlan_tag = spec->tci;
309 eth->mask.vlan_tag = mask->tci;
310 eth->val.vlan_tag &= eth->mask.vlan_tag;
313 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
318 * Merge IPv4 pattern item into flow rule handle.
320 * Additional mlx4-specific constraints on supported fields:
322 * - No support for partial masks.
324 * @param[in, out] flow
325 * Flow rule handle to update.
327 * Pattern item to merge.
329 * Associated item-processing object.
331 * Perform verbose error reporting if not NULL.
334 * 0 on success, a negative errno value otherwise and rte_errno is set.
337 mlx4_flow_merge_ipv4(struct rte_flow *flow,
338 const struct rte_flow_item *item,
339 const struct mlx4_flow_proc_item *proc,
340 struct rte_flow_error *error)
342 const struct rte_flow_item_ipv4 *spec = item->spec;
343 const struct rte_flow_item_ipv4 *mask =
344 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
345 struct ibv_flow_spec_ipv4 *ipv4;
349 ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
350 (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
351 msg = "mlx4 does not support matching partial IPv4 fields";
356 ++flow->ibv_attr->num_of_specs;
357 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
358 *ipv4 = (struct ibv_flow_spec_ipv4) {
359 .type = IBV_FLOW_SPEC_IPV4,
360 .size = sizeof(*ipv4),
364 ipv4->val = (struct ibv_flow_ipv4_filter) {
365 .src_ip = spec->hdr.src_addr,
366 .dst_ip = spec->hdr.dst_addr,
368 ipv4->mask = (struct ibv_flow_ipv4_filter) {
369 .src_ip = mask->hdr.src_addr,
370 .dst_ip = mask->hdr.dst_addr,
372 /* Remove unwanted bits from values. */
373 ipv4->val.src_ip &= ipv4->mask.src_ip;
374 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
377 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
382 * Merge UDP pattern item into flow rule handle.
384 * Additional mlx4-specific constraints on supported fields:
386 * - No support for partial masks.
388 * @param[in, out] flow
389 * Flow rule handle to update.
391 * Pattern item to merge.
393 * Associated item-processing object.
395 * Perform verbose error reporting if not NULL.
398 * 0 on success, a negative errno value otherwise and rte_errno is set.
401 mlx4_flow_merge_udp(struct rte_flow *flow,
402 const struct rte_flow_item *item,
403 const struct mlx4_flow_proc_item *proc,
404 struct rte_flow_error *error)
406 const struct rte_flow_item_udp *spec = item->spec;
407 const struct rte_flow_item_udp *mask =
408 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
409 struct ibv_flow_spec_tcp_udp *udp;
413 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
414 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
415 msg = "mlx4 does not support matching partial UDP fields";
420 ++flow->ibv_attr->num_of_specs;
421 udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
422 *udp = (struct ibv_flow_spec_tcp_udp) {
423 .type = IBV_FLOW_SPEC_UDP,
424 .size = sizeof(*udp),
428 udp->val.dst_port = spec->hdr.dst_port;
429 udp->val.src_port = spec->hdr.src_port;
430 udp->mask.dst_port = mask->hdr.dst_port;
431 udp->mask.src_port = mask->hdr.src_port;
432 /* Remove unwanted bits from values. */
433 udp->val.src_port &= udp->mask.src_port;
434 udp->val.dst_port &= udp->mask.dst_port;
437 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
442 * Merge TCP pattern item into flow rule handle.
444 * Additional mlx4-specific constraints on supported fields:
446 * - No support for partial masks.
448 * @param[in, out] flow
449 * Flow rule handle to update.
451 * Pattern item to merge.
453 * Associated item-processing object.
455 * Perform verbose error reporting if not NULL.
458 * 0 on success, a negative errno value otherwise and rte_errno is set.
461 mlx4_flow_merge_tcp(struct rte_flow *flow,
462 const struct rte_flow_item *item,
463 const struct mlx4_flow_proc_item *proc,
464 struct rte_flow_error *error)
466 const struct rte_flow_item_tcp *spec = item->spec;
467 const struct rte_flow_item_tcp *mask =
468 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
469 struct ibv_flow_spec_tcp_udp *tcp;
473 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
474 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
475 msg = "mlx4 does not support matching partial TCP fields";
480 ++flow->ibv_attr->num_of_specs;
481 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
482 *tcp = (struct ibv_flow_spec_tcp_udp) {
483 .type = IBV_FLOW_SPEC_TCP,
484 .size = sizeof(*tcp),
488 tcp->val.dst_port = spec->hdr.dst_port;
489 tcp->val.src_port = spec->hdr.src_port;
490 tcp->mask.dst_port = mask->hdr.dst_port;
491 tcp->mask.src_port = mask->hdr.src_port;
492 /* Remove unwanted bits from values. */
493 tcp->val.src_port &= tcp->mask.src_port;
494 tcp->val.dst_port &= tcp->mask.dst_port;
497 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
502 * Perform basic sanity checks on a pattern item.
505 * Item specification.
507 * Associated item-processing object.
509 * Perform verbose error reporting if not NULL.
512 * 0 on success, a negative errno value otherwise and rte_errno is set.
515 mlx4_flow_item_check(const struct rte_flow_item *item,
516 const struct mlx4_flow_proc_item *proc,
517 struct rte_flow_error *error)
522 /* item->last and item->mask cannot exist without item->spec. */
523 if (!item->spec && (item->mask || item->last))
524 return rte_flow_error_set
525 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
526 "\"mask\" or \"last\" field provided without a"
527 " corresponding \"spec\"");
528 /* No spec, no mask, no problem. */
532 (const uint8_t *)item->mask :
533 (const uint8_t *)proc->mask_default;
536 * Single-pass check to make sure that:
537 * - Mask is supported, no bits are set outside proc->mask_support.
538 * - Both item->spec and item->last are included in mask.
540 for (i = 0; i != proc->mask_sz; ++i) {
543 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
544 ((const uint8_t *)proc->mask_support)[i])
545 return rte_flow_error_set
546 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
547 item, "unsupported field found in \"mask\"");
549 (((const uint8_t *)item->spec)[i] & mask[i]) !=
550 (((const uint8_t *)item->last)[i] & mask[i]))
551 return rte_flow_error_set
552 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
554 "range between \"spec\" and \"last\""
555 " is larger than \"mask\"");
560 /** Graph of supported items and associated actions. */
561 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
562 [RTE_FLOW_ITEM_TYPE_END] = {
563 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
565 [RTE_FLOW_ITEM_TYPE_ETH] = {
566 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
567 RTE_FLOW_ITEM_TYPE_IPV4),
568 .mask_support = &(const struct rte_flow_item_eth){
569 /* Only destination MAC can be matched. */
570 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
572 .mask_default = &rte_flow_item_eth_mask,
573 .mask_sz = sizeof(struct rte_flow_item_eth),
574 .merge = mlx4_flow_merge_eth,
575 .dst_sz = sizeof(struct ibv_flow_spec_eth),
577 [RTE_FLOW_ITEM_TYPE_VLAN] = {
578 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
579 .mask_support = &(const struct rte_flow_item_vlan){
580 /* Only TCI VID matching is supported. */
581 .tci = RTE_BE16(0x0fff),
583 .mask_default = &rte_flow_item_vlan_mask,
584 .mask_sz = sizeof(struct rte_flow_item_vlan),
585 .merge = mlx4_flow_merge_vlan,
588 [RTE_FLOW_ITEM_TYPE_IPV4] = {
589 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
590 RTE_FLOW_ITEM_TYPE_TCP),
591 .mask_support = &(const struct rte_flow_item_ipv4){
593 .src_addr = RTE_BE32(0xffffffff),
594 .dst_addr = RTE_BE32(0xffffffff),
597 .mask_default = &rte_flow_item_ipv4_mask,
598 .mask_sz = sizeof(struct rte_flow_item_ipv4),
599 .merge = mlx4_flow_merge_ipv4,
600 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
602 [RTE_FLOW_ITEM_TYPE_UDP] = {
603 .mask_support = &(const struct rte_flow_item_udp){
605 .src_port = RTE_BE16(0xffff),
606 .dst_port = RTE_BE16(0xffff),
609 .mask_default = &rte_flow_item_udp_mask,
610 .mask_sz = sizeof(struct rte_flow_item_udp),
611 .merge = mlx4_flow_merge_udp,
612 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
614 [RTE_FLOW_ITEM_TYPE_TCP] = {
615 .mask_support = &(const struct rte_flow_item_tcp){
617 .src_port = RTE_BE16(0xffff),
618 .dst_port = RTE_BE16(0xffff),
621 .mask_default = &rte_flow_item_tcp_mask,
622 .mask_sz = sizeof(struct rte_flow_item_tcp),
623 .merge = mlx4_flow_merge_tcp,
624 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
629 * Make sure a flow rule is supported and initialize associated structure.
632 * Pointer to private structure.
634 * Flow rule attributes.
636 * Pattern specification (list terminated by the END pattern item).
638 * Associated actions (list terminated by the END action).
640 * Perform verbose error reporting if not NULL.
641 * @param[in, out] addr
642 * Buffer where the resulting flow rule handle pointer must be stored.
643 * If NULL, stop processing after validation stage.
646 * 0 on success, a negative errno value otherwise and rte_errno is set.
649 mlx4_flow_prepare(struct priv *priv,
650 const struct rte_flow_attr *attr,
651 const struct rte_flow_item pattern[],
652 const struct rte_flow_action actions[],
653 struct rte_flow_error *error,
654 struct rte_flow **addr)
656 const struct rte_flow_item *item;
657 const struct rte_flow_action *action;
658 const struct mlx4_flow_proc_item *proc;
659 struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
660 struct rte_flow *flow = &temp;
661 const char *msg = NULL;
664 return rte_flow_error_set
665 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
666 NULL, "groups are not supported");
667 if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
668 return rte_flow_error_set
669 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
670 NULL, "maximum priority level is "
671 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
673 return rte_flow_error_set
674 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
675 NULL, "egress is not supported");
677 return rte_flow_error_set
678 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
679 NULL, "only ingress is supported");
681 proc = mlx4_flow_proc_item_list;
682 /* Go over pattern. */
683 for (item = pattern; item->type; ++item) {
684 const struct mlx4_flow_proc_item *next = NULL;
688 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
690 if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
694 if (flow->promisc || flow->allmulti) {
695 msg = "mlx4 does not support additional matching"
696 " criteria combined with indiscriminate"
697 " matching on Ethernet headers";
698 goto exit_item_not_supported;
700 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
701 if (proc->next_item[i] == item->type) {
702 next = &mlx4_flow_proc_item_list[item->type];
707 goto exit_item_not_supported;
710 * Perform basic sanity checks only once, while handle is
714 err = mlx4_flow_item_check(item, proc, error);
719 err = proc->merge(flow, item, proc, error);
723 flow->ibv_attr_size += proc->dst_sz;
725 /* Go over actions list. */
726 for (action = actions; action->type; ++action) {
727 switch (action->type) {
728 const struct rte_flow_action_queue *queue;
729 const struct rte_flow_action_rss *rss;
730 const struct rte_eth_rss_conf *rss_conf;
733 case RTE_FLOW_ACTION_TYPE_VOID:
735 case RTE_FLOW_ACTION_TYPE_DROP:
738 case RTE_FLOW_ACTION_TYPE_QUEUE:
741 queue = action->conf;
742 if (queue->index >= priv->dev->data->nb_rx_queues) {
743 msg = "queue target index beyond number of"
744 " configured Rx queues";
745 goto exit_action_not_supported;
747 flow->rss = mlx4_rss_get
748 (priv, 0, mlx4_rss_hash_key_default, 1,
751 msg = "not enough resources for additional"
752 " single-queue RSS context";
753 goto exit_action_not_supported;
756 case RTE_FLOW_ACTION_TYPE_RSS:
760 /* Default RSS configuration if none is provided. */
764 &(struct rte_eth_rss_conf){
765 .rss_key = mlx4_rss_hash_key_default,
766 .rss_key_len = MLX4_RSS_HASH_KEY_SIZE,
770 for (i = 0; i < rss->num; ++i)
772 priv->dev->data->nb_rx_queues)
775 msg = "queue index target beyond number of"
776 " configured Rx queues";
777 goto exit_action_not_supported;
779 if (!rte_is_power_of_2(rss->num)) {
780 msg = "for RSS, mlx4 requires the number of"
781 " queues to be a power of two";
782 goto exit_action_not_supported;
784 if (rss_conf->rss_key_len !=
785 sizeof(flow->rss->key)) {
786 msg = "mlx4 supports exactly one RSS hash key"
788 MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
789 goto exit_action_not_supported;
791 for (i = 1; i < rss->num; ++i)
792 if (rss->queue[i] - rss->queue[i - 1] != 1)
795 msg = "mlx4 requires RSS contexts to use"
796 " consecutive queue indices only";
797 goto exit_action_not_supported;
799 if (rss->queue[0] % rss->num) {
800 msg = "mlx4 requires the first queue of a RSS"
801 " context to be aligned on a multiple"
802 " of the context size";
803 goto exit_action_not_supported;
805 flow->rss = mlx4_rss_get
806 (priv, mlx4_conv_rss_hf(rss_conf->rss_hf),
807 rss_conf->rss_key, rss->num, rss->queue);
809 msg = "either invalid parameters or not enough"
810 " resources for additional multi-queue"
812 goto exit_action_not_supported;
816 goto exit_action_not_supported;
819 if (!flow->rss && !flow->drop)
820 return rte_flow_error_set
821 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
822 NULL, "no valid action");
823 /* Validation ends here. */
826 mlx4_rss_put(flow->rss);
830 /* Allocate proper handle based on collected data. */
831 const struct mlx4_malloc_vec vec[] = {
833 .align = alignof(struct rte_flow),
834 .size = sizeof(*flow),
835 .addr = (void **)&flow,
838 .align = alignof(struct ibv_flow_attr),
839 .size = temp.ibv_attr_size,
840 .addr = (void **)&temp.ibv_attr,
844 if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec)))
845 return rte_flow_error_set
847 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
848 "flow rule handle allocation failure");
849 /* Most fields will be updated by second pass. */
850 *flow = (struct rte_flow){
851 .ibv_attr = temp.ibv_attr,
852 .ibv_attr_size = sizeof(*flow->ibv_attr),
855 *flow->ibv_attr = (struct ibv_flow_attr){
856 .type = IBV_FLOW_ATTR_NORMAL,
857 .size = sizeof(*flow->ibv_attr),
858 .priority = attr->priority,
865 exit_item_not_supported:
866 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
867 item, msg ? msg : "item not supported");
868 exit_action_not_supported:
869 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
870 action, msg ? msg : "action not supported");
874 * Validate a flow supported by the NIC.
876 * @see rte_flow_validate()
880 mlx4_flow_validate(struct rte_eth_dev *dev,
881 const struct rte_flow_attr *attr,
882 const struct rte_flow_item pattern[],
883 const struct rte_flow_action actions[],
884 struct rte_flow_error *error)
886 struct priv *priv = dev->data->dev_private;
888 return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
892 * Get a drop flow rule resources instance.
895 * Pointer to private structure.
898 * Pointer to drop flow resources on success, NULL otherwise and rte_errno
901 static struct mlx4_drop *
902 mlx4_drop_get(struct priv *priv)
904 struct mlx4_drop *drop = priv->drop;
907 assert(drop->refcnt);
908 assert(drop->priv == priv);
912 drop = rte_malloc(__func__, sizeof(*drop), 0);
915 *drop = (struct mlx4_drop){
919 drop->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
922 drop->qp = ibv_create_qp(priv->pd,
923 &(struct ibv_qp_init_attr){
926 .qp_type = IBV_QPT_RAW_PACKET,
934 claim_zero(ibv_destroy_qp(drop->qp));
936 claim_zero(ibv_destroy_cq(drop->cq));
944 * Give back a drop flow rule resources instance.
947 * Pointer to drop flow rule resources.
950 mlx4_drop_put(struct mlx4_drop *drop)
952 assert(drop->refcnt);
955 drop->priv->drop = NULL;
956 claim_zero(ibv_destroy_qp(drop->qp));
957 claim_zero(ibv_destroy_cq(drop->cq));
962 * Toggle a configured flow rule.
965 * Pointer to private structure.
967 * Flow rule handle to toggle.
969 * Whether associated Verbs flow must be created or removed.
971 * Perform verbose error reporting if not NULL.
974 * 0 on success, a negative errno value otherwise and rte_errno is set.
977 mlx4_flow_toggle(struct priv *priv,
978 struct rte_flow *flow,
980 struct rte_flow_error *error)
982 struct ibv_qp *qp = NULL;
989 claim_zero(ibv_destroy_flow(flow->ibv_flow));
990 flow->ibv_flow = NULL;
992 mlx4_drop_put(priv->drop);
994 mlx4_rss_detach(flow->rss);
997 assert(flow->ibv_attr);
998 if (!flow->internal &&
1000 flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
1001 if (flow->ibv_flow) {
1002 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1003 flow->ibv_flow = NULL;
1005 mlx4_drop_put(priv->drop);
1007 mlx4_rss_detach(flow->rss);
1010 msg = ("priority level "
1011 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
1012 " is reserved when not in isolated mode");
1016 struct mlx4_rss *rss = flow->rss;
1020 /* Stop at the first nonexistent target queue. */
1021 for (i = 0; i != rss->queues; ++i)
1022 if (rss->queue_id[i] >=
1023 priv->dev->data->nb_rx_queues ||
1024 !priv->dev->data->rx_queues[rss->queue_id[i]]) {
1028 if (flow->ibv_flow) {
1029 if (missing ^ !flow->drop)
1031 /* Verbs flow needs updating. */
1032 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1033 flow->ibv_flow = NULL;
1035 mlx4_drop_put(priv->drop);
1037 mlx4_rss_detach(rss);
1040 err = mlx4_rss_attach(rss);
1043 msg = "cannot create indirection table or hash"
1044 " QP to associate flow rule with";
1049 /* A missing target queue drops traffic implicitly. */
1050 flow->drop = missing;
1053 mlx4_drop_get(priv);
1056 msg = "resources for drop flow rule cannot be created";
1059 qp = priv->drop->qp;
1064 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1068 mlx4_drop_put(priv->drop);
1070 mlx4_rss_detach(flow->rss);
1072 msg = "flow rule rejected by device";
1074 return rte_flow_error_set
1075 (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
1081 * @see rte_flow_create()
1084 static struct rte_flow *
1085 mlx4_flow_create(struct rte_eth_dev *dev,
1086 const struct rte_flow_attr *attr,
1087 const struct rte_flow_item pattern[],
1088 const struct rte_flow_action actions[],
1089 struct rte_flow_error *error)
1091 struct priv *priv = dev->data->dev_private;
1092 struct rte_flow *flow;
1095 err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
1098 err = mlx4_flow_toggle(priv, flow, priv->started, error);
1100 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1102 /* New rules are inserted after internal ones. */
1103 if (!curr || !curr->internal) {
1104 LIST_INSERT_HEAD(&priv->flows, flow, next);
1106 while (LIST_NEXT(curr, next) &&
1107 LIST_NEXT(curr, next)->internal)
1108 curr = LIST_NEXT(curr, next);
1109 LIST_INSERT_AFTER(curr, flow, next);
1114 mlx4_rss_put(flow->rss);
1115 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1122 * Configure isolated mode.
1124 * @see rte_flow_isolate()
1128 mlx4_flow_isolate(struct rte_eth_dev *dev,
1130 struct rte_flow_error *error)
1132 struct priv *priv = dev->data->dev_private;
1134 if (!!enable == !!priv->isolated)
1136 priv->isolated = !!enable;
1137 if (mlx4_flow_sync(priv, error)) {
1138 priv->isolated = !enable;
1145 * Destroy a flow rule.
1147 * @see rte_flow_destroy()
1151 mlx4_flow_destroy(struct rte_eth_dev *dev,
1152 struct rte_flow *flow,
1153 struct rte_flow_error *error)
1155 struct priv *priv = dev->data->dev_private;
1156 int err = mlx4_flow_toggle(priv, flow, 0, error);
1160 LIST_REMOVE(flow, next);
1162 mlx4_rss_put(flow->rss);
1168 * Destroy user-configured flow rules.
1170 * This function skips internal flows rules.
1172 * @see rte_flow_flush()
1176 mlx4_flow_flush(struct rte_eth_dev *dev,
1177 struct rte_flow_error *error)
1179 struct priv *priv = dev->data->dev_private;
1180 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1183 struct rte_flow *next = LIST_NEXT(flow, next);
1185 if (!flow->internal)
1186 mlx4_flow_destroy(dev, flow, error);
1193 * Helper function to determine the next configured VLAN filter.
1196 * Pointer to private structure.
1198 * VLAN ID to use as a starting point.
1201 * Next configured VLAN ID or a high value (>= 4096) if there is none.
1204 mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
1206 while (vlan < 4096) {
1207 if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
1208 (UINT64_C(1) << (vlan % 64)))
1216 * Generate internal flow rules.
1218 * Various flow rules are created depending on the mode the device is in:
1220 * 1. Promiscuous: port MAC + catch-all (VLAN filtering is ignored).
1221 * 2. All multicast: port MAC/VLAN + catch-all multicast.
1222 * 3. Otherwise: port MAC/VLAN + broadcast MAC/VLAN.
1224 * About MAC flow rules:
1226 * - MAC flow rules are generated from @p dev->data->mac_addrs
1227 * (@p priv->mac array).
1228 * - An additional flow rule for Ethernet broadcasts is also generated.
1229 * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
1230 * is enabled and VLAN filters are configured.
1233 * Pointer to private structure.
1235 * Perform verbose error reporting if not NULL.
1238 * 0 on success, a negative errno value otherwise and rte_errno is set.
1241 mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
1243 struct rte_flow_attr attr = {
1244 .priority = MLX4_FLOW_PRIORITY_LAST,
1247 struct rte_flow_item_eth eth_spec;
1248 const struct rte_flow_item_eth eth_mask = {
1249 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1251 const struct rte_flow_item_eth eth_allmulti = {
1252 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1254 struct rte_flow_item_vlan vlan_spec;
1255 const struct rte_flow_item_vlan vlan_mask = {
1256 .tci = RTE_BE16(0x0fff),
1258 struct rte_flow_item pattern[] = {
1260 .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
1263 .type = RTE_FLOW_ITEM_TYPE_ETH,
1268 /* Replaced with VLAN if filtering is enabled. */
1269 .type = RTE_FLOW_ITEM_TYPE_END,
1272 .type = RTE_FLOW_ITEM_TYPE_END,
1276 * Round number of queues down to their previous power of 2 to
1277 * comply with RSS context limitations. Extra queues silently do not
1278 * get RSS by default.
1281 rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
1282 alignas(struct rte_flow_action_rss) uint8_t rss_conf_data
1283 [offsetof(struct rte_flow_action_rss, queue) +
1284 sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues];
1285 struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data;
1286 struct rte_flow_action actions[] = {
1288 .type = RTE_FLOW_ACTION_TYPE_RSS,
1292 .type = RTE_FLOW_ACTION_TYPE_END,
1295 struct ether_addr *rule_mac = ð_spec.dst;
1296 rte_be16_t *rule_vlan =
1297 priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
1298 !priv->dev->data->promiscuous ?
1302 !priv->dev->data->promiscuous &&
1303 !priv->dev->data->all_multicast;
1305 struct rte_flow *flow;
1309 /* Nothing to be done if there are no Rx queues. */
1312 /* Prepare default RSS configuration. */
1313 *rss_conf = (struct rte_flow_action_rss){
1314 .rss_conf = NULL, /* Rely on default fallback settings. */
1317 for (i = 0; i != queues; ++i)
1318 rss_conf->queue[i] = i;
1320 * Set up VLAN item if filtering is enabled and at least one VLAN
1321 * filter is configured.
1324 vlan = mlx4_flow_internal_next_vlan(priv, 0);
1326 pattern[2] = (struct rte_flow_item){
1327 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1332 *rule_vlan = rte_cpu_to_be_16(vlan);
1337 for (i = 0; i != RTE_DIM(priv->mac) + broadcast; ++i) {
1338 const struct ether_addr *mac;
1340 /* Broadcasts are handled by an extra iteration. */
1341 if (i < RTE_DIM(priv->mac))
1342 mac = &priv->mac[i];
1344 mac = ð_mask.dst;
1345 if (is_zero_ether_addr(mac))
1347 /* Check if MAC flow rule is already present. */
1348 for (flow = LIST_FIRST(&priv->flows);
1349 flow && flow->internal;
1350 flow = LIST_NEXT(flow, next)) {
1351 const struct ibv_flow_spec_eth *eth =
1352 (const void *)((uintptr_t)flow->ibv_attr +
1353 sizeof(*flow->ibv_attr));
1358 assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
1359 assert(flow->ibv_attr->num_of_specs == 1);
1360 assert(eth->type == IBV_FLOW_SPEC_ETH);
1363 (eth->val.vlan_tag != *rule_vlan ||
1364 eth->mask.vlan_tag != RTE_BE16(0x0fff)))
1366 if (!rule_vlan && eth->mask.vlan_tag)
1368 for (j = 0; j != sizeof(mac->addr_bytes); ++j)
1369 if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
1370 eth->mask.dst_mac[j] != UINT8_C(0xff) ||
1371 eth->val.src_mac[j] != UINT8_C(0x00) ||
1372 eth->mask.src_mac[j] != UINT8_C(0x00))
1374 if (j != sizeof(mac->addr_bytes))
1376 if (flow->rss->queues != queues ||
1377 memcmp(flow->rss->queue_id, rss_conf->queue,
1378 queues * sizeof(flow->rss->queue_id[0])))
1382 if (!flow || !flow->internal) {
1383 /* Not found, create a new flow rule. */
1384 memcpy(rule_mac, mac, sizeof(*mac));
1385 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1396 vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
1400 /* Take care of promiscuous and all multicast flow rules. */
1402 for (flow = LIST_FIRST(&priv->flows);
1403 flow && flow->internal;
1404 flow = LIST_NEXT(flow, next)) {
1405 if (priv->dev->data->promiscuous) {
1409 assert(priv->dev->data->all_multicast);
1414 if (flow && flow->internal) {
1416 if (flow->rss->queues != queues ||
1417 memcmp(flow->rss->queue_id, rss_conf->queue,
1418 queues * sizeof(flow->rss->queue_id[0])))
1421 if (!flow || !flow->internal) {
1422 /* Not found, create a new flow rule. */
1423 if (priv->dev->data->promiscuous) {
1424 pattern[1].spec = NULL;
1425 pattern[1].mask = NULL;
1427 assert(priv->dev->data->all_multicast);
1428 pattern[1].spec = ð_allmulti;
1429 pattern[1].mask = ð_allmulti;
1431 pattern[2] = pattern[3];
1432 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1439 assert(flow->promisc || flow->allmulti);
1443 /* Clear selection and clean up stale internal flow rules. */
1444 flow = LIST_FIRST(&priv->flows);
1445 while (flow && flow->internal) {
1446 struct rte_flow *next = LIST_NEXT(flow, next);
1449 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1458 * Synchronize flow rules.
1460 * This function synchronizes flow rules with the state of the device by
1461 * taking into account isolated mode and whether target queues are
1465 * Pointer to private structure.
1467 * Perform verbose error reporting if not NULL.
1470 * 0 on success, a negative errno value otherwise and rte_errno is set.
1473 mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
1475 struct rte_flow *flow;
1478 /* Internal flow rules are guaranteed to come first in the list. */
1479 if (priv->isolated) {
1481 * Get rid of them in isolated mode, stop at the first
1482 * non-internal rule found.
1484 for (flow = LIST_FIRST(&priv->flows);
1485 flow && flow->internal;
1486 flow = LIST_FIRST(&priv->flows))
1487 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1489 /* Refresh internal rules. */
1490 ret = mlx4_flow_internal(priv, error);
1494 /* Toggle the remaining flow rules . */
1495 LIST_FOREACH(flow, &priv->flows, next) {
1496 ret = mlx4_flow_toggle(priv, flow, priv->started, error);
1501 assert(!priv->drop);
1506 * Clean up all flow rules.
1508 * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
1509 * rules regardless of whether they are internal or user-configured.
1512 * Pointer to private structure.
1515 mlx4_flow_clean(struct priv *priv)
1517 struct rte_flow *flow;
1519 while ((flow = LIST_FIRST(&priv->flows)))
1520 mlx4_flow_destroy(priv->dev, flow, NULL);
1521 assert(LIST_EMPTY(&priv->rss));
1524 static const struct rte_flow_ops mlx4_flow_ops = {
1525 .validate = mlx4_flow_validate,
1526 .create = mlx4_flow_create,
1527 .destroy = mlx4_flow_destroy,
1528 .flush = mlx4_flow_flush,
1529 .isolate = mlx4_flow_isolate,
1533 * Manage filter operations.
1536 * Pointer to Ethernet device structure.
1537 * @param filter_type
1540 * Operation to perform.
1542 * Pointer to operation-specific structure.
1545 * 0 on success, negative errno value otherwise and rte_errno is set.
1548 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1549 enum rte_filter_type filter_type,
1550 enum rte_filter_op filter_op,
1553 switch (filter_type) {
1554 case RTE_ETH_FILTER_GENERIC:
1555 if (filter_op != RTE_ETH_FILTER_GET)
1557 *(const void **)arg = &mlx4_flow_ops;
1560 ERROR("%p: filter type (%d) not supported",
1561 (void *)dev, filter_type);
1564 rte_errno = ENOTSUP;