4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Flow API operations for mlx4 driver.
39 #include <arpa/inet.h>
46 #include <sys/queue.h>
48 /* Verbs headers do not support -pedantic. */
50 #pragma GCC diagnostic ignored "-Wpedantic"
52 #include <infiniband/verbs.h>
54 #pragma GCC diagnostic error "-Wpedantic"
57 #include <rte_byteorder.h>
58 #include <rte_errno.h>
59 #include <rte_eth_ctrl.h>
60 #include <rte_ethdev_driver.h>
61 #include <rte_ether.h>
63 #include <rte_flow_driver.h>
64 #include <rte_malloc.h>
68 #include "mlx4_glue.h"
69 #include "mlx4_flow.h"
70 #include "mlx4_rxtx.h"
71 #include "mlx4_utils.h"
73 /** Static initializer for a list of subsequent item types. */
74 #define NEXT_ITEM(...) \
75 (const enum rte_flow_item_type []){ \
76 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
79 /** Processor structure associated with a flow item. */
80 struct mlx4_flow_proc_item {
81 /** Bit-mask for fields supported by this PMD. */
82 const void *mask_support;
83 /** Bit-mask to use when @p item->mask is not provided. */
84 const void *mask_default;
85 /** Size in bytes for @p mask_support and @p mask_default. */
86 const unsigned int mask_sz;
87 /** Merge a pattern item into a flow rule handle. */
88 int (*merge)(struct rte_flow *flow,
89 const struct rte_flow_item *item,
90 const struct mlx4_flow_proc_item *proc,
91 struct rte_flow_error *error);
92 /** Size in bytes of the destination structure. */
93 const unsigned int dst_sz;
94 /** List of possible subsequent items. */
95 const enum rte_flow_item_type *const next_item;
98 /** Shared resources for drop flow rules. */
100 struct ibv_qp *qp; /**< QP target. */
101 struct ibv_cq *cq; /**< CQ associated with above QP. */
102 struct priv *priv; /**< Back pointer to private data. */
103 uint32_t refcnt; /**< Reference count. */
107 * Convert DPDK RSS hash fields to their Verbs equivalent.
109 * This function returns the supported (default) set when @p rss_hf has
110 * special value (uint64_t)-1.
113 * Pointer to private structure.
115 * Hash fields in DPDK format (see struct rte_eth_rss_conf).
118 * A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
119 * otherwise and rte_errno is set.
122 mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf)
124 enum { IPV4, IPV6, TCP, UDP, };
125 const uint64_t in[] = {
126 [IPV4] = (ETH_RSS_IPV4 |
128 ETH_RSS_NONFRAG_IPV4_TCP |
129 ETH_RSS_NONFRAG_IPV4_UDP |
130 ETH_RSS_NONFRAG_IPV4_OTHER),
131 [IPV6] = (ETH_RSS_IPV6 |
133 ETH_RSS_NONFRAG_IPV6_TCP |
134 ETH_RSS_NONFRAG_IPV6_UDP |
135 ETH_RSS_NONFRAG_IPV6_OTHER |
137 ETH_RSS_IPV6_TCP_EX |
138 ETH_RSS_IPV6_UDP_EX),
139 [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP |
140 ETH_RSS_NONFRAG_IPV6_TCP |
141 ETH_RSS_IPV6_TCP_EX),
142 [UDP] = (ETH_RSS_NONFRAG_IPV4_UDP |
143 ETH_RSS_NONFRAG_IPV6_UDP |
144 ETH_RSS_IPV6_UDP_EX),
146 const uint64_t out[RTE_DIM(in)] = {
147 [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
148 [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
149 [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
150 [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
156 for (i = 0; i != RTE_DIM(in); ++i)
157 if (rss_hf & in[i]) {
158 seen |= rss_hf & in[i];
161 if ((conv & priv->hw_rss_sup) == conv) {
162 if (rss_hf == (uint64_t)-1) {
163 /* Include inner RSS by default if supported. */
164 conv |= priv->hw_rss_sup & IBV_RX_HASH_INNER;
167 if (!(rss_hf & ~seen))
175 * Merge Ethernet pattern item into flow rule handle.
177 * Additional mlx4-specific constraints on supported fields:
179 * - No support for partial masks, except in the specific case of matching
180 * all multicast traffic (@p spec->dst and @p mask->dst equal to
181 * 01:00:00:00:00:00).
182 * - Not providing @p item->spec or providing an empty @p mask->dst is
183 * *only* supported if the rule doesn't specify additional matching
184 * criteria (i.e. rule is promiscuous-like).
186 * @param[in, out] flow
187 * Flow rule handle to update.
189 * Pattern item to merge.
191 * Associated item-processing object.
193 * Perform verbose error reporting if not NULL.
196 * 0 on success, a negative errno value otherwise and rte_errno is set.
199 mlx4_flow_merge_eth(struct rte_flow *flow,
200 const struct rte_flow_item *item,
201 const struct mlx4_flow_proc_item *proc,
202 struct rte_flow_error *error)
204 const struct rte_flow_item_eth *spec = item->spec;
205 const struct rte_flow_item_eth *mask =
206 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
207 struct ibv_flow_spec_eth *eth;
214 uint32_t sum_dst = 0;
215 uint32_t sum_src = 0;
217 for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
218 sum_dst += mask->dst.addr_bytes[i];
219 sum_src += mask->src.addr_bytes[i];
222 msg = "mlx4 does not support source MAC matching";
224 } else if (!sum_dst) {
226 } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
227 if (!(spec->dst.addr_bytes[0] & 1)) {
228 msg = "mlx4 does not support the explicit"
229 " exclusion of all multicast traffic";
233 } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
234 msg = "mlx4 does not support matching partial"
242 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
245 if (flow->allmulti) {
246 flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
249 ++flow->ibv_attr->num_of_specs;
250 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
251 *eth = (struct ibv_flow_spec_eth) {
252 .type = IBV_FLOW_SPEC_ETH,
253 .size = sizeof(*eth),
255 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
256 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
257 /* Remove unwanted bits from values. */
258 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
259 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
263 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
268 * Merge VLAN pattern item into flow rule handle.
270 * Additional mlx4-specific constraints on supported fields:
272 * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
273 * empty @p item->mask would also include non-VLAN traffic. Doing so is
274 * therefore unsupported.
275 * - No support for partial masks.
277 * @param[in, out] flow
278 * Flow rule handle to update.
280 * Pattern item to merge.
282 * Associated item-processing object.
284 * Perform verbose error reporting if not NULL.
287 * 0 on success, a negative errno value otherwise and rte_errno is set.
290 mlx4_flow_merge_vlan(struct rte_flow *flow,
291 const struct rte_flow_item *item,
292 const struct mlx4_flow_proc_item *proc,
293 struct rte_flow_error *error)
295 const struct rte_flow_item_vlan *spec = item->spec;
296 const struct rte_flow_item_vlan *mask =
297 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
298 struct ibv_flow_spec_eth *eth;
301 if (!mask || !mask->tci) {
302 msg = "mlx4 cannot match all VLAN traffic while excluding"
303 " non-VLAN traffic, TCI VID must be specified";
306 if (mask->tci != RTE_BE16(0x0fff)) {
307 msg = "mlx4 does not support partial TCI VID matching";
312 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
314 eth->val.vlan_tag = spec->tci;
315 eth->mask.vlan_tag = mask->tci;
316 eth->val.vlan_tag &= eth->mask.vlan_tag;
319 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
324 * Merge IPv4 pattern item into flow rule handle.
326 * Additional mlx4-specific constraints on supported fields:
328 * - No support for partial masks.
330 * @param[in, out] flow
331 * Flow rule handle to update.
333 * Pattern item to merge.
335 * Associated item-processing object.
337 * Perform verbose error reporting if not NULL.
340 * 0 on success, a negative errno value otherwise and rte_errno is set.
343 mlx4_flow_merge_ipv4(struct rte_flow *flow,
344 const struct rte_flow_item *item,
345 const struct mlx4_flow_proc_item *proc,
346 struct rte_flow_error *error)
348 const struct rte_flow_item_ipv4 *spec = item->spec;
349 const struct rte_flow_item_ipv4 *mask =
350 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
351 struct ibv_flow_spec_ipv4 *ipv4;
355 ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
356 (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
357 msg = "mlx4 does not support matching partial IPv4 fields";
362 ++flow->ibv_attr->num_of_specs;
363 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
364 *ipv4 = (struct ibv_flow_spec_ipv4) {
365 .type = IBV_FLOW_SPEC_IPV4,
366 .size = sizeof(*ipv4),
370 ipv4->val = (struct ibv_flow_ipv4_filter) {
371 .src_ip = spec->hdr.src_addr,
372 .dst_ip = spec->hdr.dst_addr,
374 ipv4->mask = (struct ibv_flow_ipv4_filter) {
375 .src_ip = mask->hdr.src_addr,
376 .dst_ip = mask->hdr.dst_addr,
378 /* Remove unwanted bits from values. */
379 ipv4->val.src_ip &= ipv4->mask.src_ip;
380 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
383 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
388 * Merge UDP pattern item into flow rule handle.
390 * Additional mlx4-specific constraints on supported fields:
392 * - No support for partial masks.
394 * @param[in, out] flow
395 * Flow rule handle to update.
397 * Pattern item to merge.
399 * Associated item-processing object.
401 * Perform verbose error reporting if not NULL.
404 * 0 on success, a negative errno value otherwise and rte_errno is set.
407 mlx4_flow_merge_udp(struct rte_flow *flow,
408 const struct rte_flow_item *item,
409 const struct mlx4_flow_proc_item *proc,
410 struct rte_flow_error *error)
412 const struct rte_flow_item_udp *spec = item->spec;
413 const struct rte_flow_item_udp *mask =
414 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
415 struct ibv_flow_spec_tcp_udp *udp;
419 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
420 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
421 msg = "mlx4 does not support matching partial UDP fields";
426 ++flow->ibv_attr->num_of_specs;
427 udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
428 *udp = (struct ibv_flow_spec_tcp_udp) {
429 .type = IBV_FLOW_SPEC_UDP,
430 .size = sizeof(*udp),
434 udp->val.dst_port = spec->hdr.dst_port;
435 udp->val.src_port = spec->hdr.src_port;
436 udp->mask.dst_port = mask->hdr.dst_port;
437 udp->mask.src_port = mask->hdr.src_port;
438 /* Remove unwanted bits from values. */
439 udp->val.src_port &= udp->mask.src_port;
440 udp->val.dst_port &= udp->mask.dst_port;
443 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
448 * Merge TCP pattern item into flow rule handle.
450 * Additional mlx4-specific constraints on supported fields:
452 * - No support for partial masks.
454 * @param[in, out] flow
455 * Flow rule handle to update.
457 * Pattern item to merge.
459 * Associated item-processing object.
461 * Perform verbose error reporting if not NULL.
464 * 0 on success, a negative errno value otherwise and rte_errno is set.
467 mlx4_flow_merge_tcp(struct rte_flow *flow,
468 const struct rte_flow_item *item,
469 const struct mlx4_flow_proc_item *proc,
470 struct rte_flow_error *error)
472 const struct rte_flow_item_tcp *spec = item->spec;
473 const struct rte_flow_item_tcp *mask =
474 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
475 struct ibv_flow_spec_tcp_udp *tcp;
479 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
480 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
481 msg = "mlx4 does not support matching partial TCP fields";
486 ++flow->ibv_attr->num_of_specs;
487 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
488 *tcp = (struct ibv_flow_spec_tcp_udp) {
489 .type = IBV_FLOW_SPEC_TCP,
490 .size = sizeof(*tcp),
494 tcp->val.dst_port = spec->hdr.dst_port;
495 tcp->val.src_port = spec->hdr.src_port;
496 tcp->mask.dst_port = mask->hdr.dst_port;
497 tcp->mask.src_port = mask->hdr.src_port;
498 /* Remove unwanted bits from values. */
499 tcp->val.src_port &= tcp->mask.src_port;
500 tcp->val.dst_port &= tcp->mask.dst_port;
503 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
508 * Perform basic sanity checks on a pattern item.
511 * Item specification.
513 * Associated item-processing object.
515 * Perform verbose error reporting if not NULL.
518 * 0 on success, a negative errno value otherwise and rte_errno is set.
521 mlx4_flow_item_check(const struct rte_flow_item *item,
522 const struct mlx4_flow_proc_item *proc,
523 struct rte_flow_error *error)
528 /* item->last and item->mask cannot exist without item->spec. */
529 if (!item->spec && (item->mask || item->last))
530 return rte_flow_error_set
531 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
532 "\"mask\" or \"last\" field provided without a"
533 " corresponding \"spec\"");
534 /* No spec, no mask, no problem. */
538 (const uint8_t *)item->mask :
539 (const uint8_t *)proc->mask_default;
542 * Single-pass check to make sure that:
543 * - Mask is supported, no bits are set outside proc->mask_support.
544 * - Both item->spec and item->last are included in mask.
546 for (i = 0; i != proc->mask_sz; ++i) {
549 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
550 ((const uint8_t *)proc->mask_support)[i])
551 return rte_flow_error_set
552 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
553 item, "unsupported field found in \"mask\"");
555 (((const uint8_t *)item->spec)[i] & mask[i]) !=
556 (((const uint8_t *)item->last)[i] & mask[i]))
557 return rte_flow_error_set
558 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
560 "range between \"spec\" and \"last\""
561 " is larger than \"mask\"");
566 /** Graph of supported items and associated actions. */
567 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
568 [RTE_FLOW_ITEM_TYPE_END] = {
569 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
571 [RTE_FLOW_ITEM_TYPE_ETH] = {
572 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
573 RTE_FLOW_ITEM_TYPE_IPV4),
574 .mask_support = &(const struct rte_flow_item_eth){
575 /* Only destination MAC can be matched. */
576 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
578 .mask_default = &rte_flow_item_eth_mask,
579 .mask_sz = sizeof(struct rte_flow_item_eth),
580 .merge = mlx4_flow_merge_eth,
581 .dst_sz = sizeof(struct ibv_flow_spec_eth),
583 [RTE_FLOW_ITEM_TYPE_VLAN] = {
584 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
585 .mask_support = &(const struct rte_flow_item_vlan){
586 /* Only TCI VID matching is supported. */
587 .tci = RTE_BE16(0x0fff),
589 .mask_default = &rte_flow_item_vlan_mask,
590 .mask_sz = sizeof(struct rte_flow_item_vlan),
591 .merge = mlx4_flow_merge_vlan,
594 [RTE_FLOW_ITEM_TYPE_IPV4] = {
595 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
596 RTE_FLOW_ITEM_TYPE_TCP),
597 .mask_support = &(const struct rte_flow_item_ipv4){
599 .src_addr = RTE_BE32(0xffffffff),
600 .dst_addr = RTE_BE32(0xffffffff),
603 .mask_default = &rte_flow_item_ipv4_mask,
604 .mask_sz = sizeof(struct rte_flow_item_ipv4),
605 .merge = mlx4_flow_merge_ipv4,
606 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
608 [RTE_FLOW_ITEM_TYPE_UDP] = {
609 .mask_support = &(const struct rte_flow_item_udp){
611 .src_port = RTE_BE16(0xffff),
612 .dst_port = RTE_BE16(0xffff),
615 .mask_default = &rte_flow_item_udp_mask,
616 .mask_sz = sizeof(struct rte_flow_item_udp),
617 .merge = mlx4_flow_merge_udp,
618 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
620 [RTE_FLOW_ITEM_TYPE_TCP] = {
621 .mask_support = &(const struct rte_flow_item_tcp){
623 .src_port = RTE_BE16(0xffff),
624 .dst_port = RTE_BE16(0xffff),
627 .mask_default = &rte_flow_item_tcp_mask,
628 .mask_sz = sizeof(struct rte_flow_item_tcp),
629 .merge = mlx4_flow_merge_tcp,
630 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
635 * Make sure a flow rule is supported and initialize associated structure.
638 * Pointer to private structure.
640 * Flow rule attributes.
642 * Pattern specification (list terminated by the END pattern item).
644 * Associated actions (list terminated by the END action).
646 * Perform verbose error reporting if not NULL.
647 * @param[in, out] addr
648 * Buffer where the resulting flow rule handle pointer must be stored.
649 * If NULL, stop processing after validation stage.
652 * 0 on success, a negative errno value otherwise and rte_errno is set.
655 mlx4_flow_prepare(struct priv *priv,
656 const struct rte_flow_attr *attr,
657 const struct rte_flow_item pattern[],
658 const struct rte_flow_action actions[],
659 struct rte_flow_error *error,
660 struct rte_flow **addr)
662 const struct rte_flow_item *item;
663 const struct rte_flow_action *action;
664 const struct mlx4_flow_proc_item *proc;
665 struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
666 struct rte_flow *flow = &temp;
667 const char *msg = NULL;
670 return rte_flow_error_set
671 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
672 NULL, "groups are not supported");
673 if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
674 return rte_flow_error_set
675 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
676 NULL, "maximum priority level is "
677 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
679 return rte_flow_error_set
680 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
681 NULL, "egress is not supported");
683 return rte_flow_error_set
684 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
685 NULL, "only ingress is supported");
687 proc = mlx4_flow_proc_item_list;
688 /* Go over pattern. */
689 for (item = pattern; item->type; ++item) {
690 const struct mlx4_flow_proc_item *next = NULL;
694 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
696 if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
700 if (flow->promisc || flow->allmulti) {
701 msg = "mlx4 does not support additional matching"
702 " criteria combined with indiscriminate"
703 " matching on Ethernet headers";
704 goto exit_item_not_supported;
706 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
707 if (proc->next_item[i] == item->type) {
708 next = &mlx4_flow_proc_item_list[item->type];
713 goto exit_item_not_supported;
716 * Perform basic sanity checks only once, while handle is
720 err = mlx4_flow_item_check(item, proc, error);
725 err = proc->merge(flow, item, proc, error);
729 flow->ibv_attr_size += proc->dst_sz;
731 /* Go over actions list. */
732 for (action = actions; action->type; ++action) {
733 switch (action->type) {
734 const struct rte_flow_action_queue *queue;
735 const struct rte_flow_action_rss *rss;
736 const struct rte_eth_rss_conf *rss_conf;
739 case RTE_FLOW_ACTION_TYPE_VOID:
741 case RTE_FLOW_ACTION_TYPE_DROP:
744 case RTE_FLOW_ACTION_TYPE_QUEUE:
747 queue = action->conf;
748 if (queue->index >= priv->dev->data->nb_rx_queues) {
749 msg = "queue target index beyond number of"
750 " configured Rx queues";
751 goto exit_action_not_supported;
753 flow->rss = mlx4_rss_get
754 (priv, 0, mlx4_rss_hash_key_default, 1,
757 msg = "not enough resources for additional"
758 " single-queue RSS context";
759 goto exit_action_not_supported;
762 case RTE_FLOW_ACTION_TYPE_RSS:
766 /* Default RSS configuration if none is provided. */
770 &(struct rte_eth_rss_conf){
771 .rss_key = mlx4_rss_hash_key_default,
772 .rss_key_len = MLX4_RSS_HASH_KEY_SIZE,
776 for (i = 0; i < rss->num; ++i)
778 priv->dev->data->nb_rx_queues)
781 msg = "queue index target beyond number of"
782 " configured Rx queues";
783 goto exit_action_not_supported;
785 if (!rte_is_power_of_2(rss->num)) {
786 msg = "for RSS, mlx4 requires the number of"
787 " queues to be a power of two";
788 goto exit_action_not_supported;
790 if (rss_conf->rss_key_len !=
791 sizeof(flow->rss->key)) {
792 msg = "mlx4 supports exactly one RSS hash key"
794 MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
795 goto exit_action_not_supported;
797 for (i = 1; i < rss->num; ++i)
798 if (rss->queue[i] - rss->queue[i - 1] != 1)
801 msg = "mlx4 requires RSS contexts to use"
802 " consecutive queue indices only";
803 goto exit_action_not_supported;
805 if (rss->queue[0] % rss->num) {
806 msg = "mlx4 requires the first queue of a RSS"
807 " context to be aligned on a multiple"
808 " of the context size";
809 goto exit_action_not_supported;
811 flow->rss = mlx4_rss_get
813 mlx4_conv_rss_hf(priv, rss_conf->rss_hf),
814 rss_conf->rss_key, rss->num, rss->queue);
816 msg = "either invalid parameters or not enough"
817 " resources for additional multi-queue"
819 goto exit_action_not_supported;
823 goto exit_action_not_supported;
826 if (!flow->rss && !flow->drop)
827 return rte_flow_error_set
828 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
829 NULL, "no valid action");
830 /* Validation ends here. */
833 mlx4_rss_put(flow->rss);
837 /* Allocate proper handle based on collected data. */
838 const struct mlx4_malloc_vec vec[] = {
840 .align = alignof(struct rte_flow),
841 .size = sizeof(*flow),
842 .addr = (void **)&flow,
845 .align = alignof(struct ibv_flow_attr),
846 .size = temp.ibv_attr_size,
847 .addr = (void **)&temp.ibv_attr,
851 if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec)))
852 return rte_flow_error_set
854 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
855 "flow rule handle allocation failure");
856 /* Most fields will be updated by second pass. */
857 *flow = (struct rte_flow){
858 .ibv_attr = temp.ibv_attr,
859 .ibv_attr_size = sizeof(*flow->ibv_attr),
862 *flow->ibv_attr = (struct ibv_flow_attr){
863 .type = IBV_FLOW_ATTR_NORMAL,
864 .size = sizeof(*flow->ibv_attr),
865 .priority = attr->priority,
872 exit_item_not_supported:
873 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
874 item, msg ? msg : "item not supported");
875 exit_action_not_supported:
876 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
877 action, msg ? msg : "action not supported");
881 * Validate a flow supported by the NIC.
883 * @see rte_flow_validate()
887 mlx4_flow_validate(struct rte_eth_dev *dev,
888 const struct rte_flow_attr *attr,
889 const struct rte_flow_item pattern[],
890 const struct rte_flow_action actions[],
891 struct rte_flow_error *error)
893 struct priv *priv = dev->data->dev_private;
895 return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
899 * Get a drop flow rule resources instance.
902 * Pointer to private structure.
905 * Pointer to drop flow resources on success, NULL otherwise and rte_errno
908 static struct mlx4_drop *
909 mlx4_drop_get(struct priv *priv)
911 struct mlx4_drop *drop = priv->drop;
914 assert(drop->refcnt);
915 assert(drop->priv == priv);
919 drop = rte_malloc(__func__, sizeof(*drop), 0);
922 *drop = (struct mlx4_drop){
926 drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
929 drop->qp = mlx4_glue->create_qp
931 &(struct ibv_qp_init_attr){
934 .qp_type = IBV_QPT_RAW_PACKET,
942 claim_zero(mlx4_glue->destroy_qp(drop->qp));
944 claim_zero(mlx4_glue->destroy_cq(drop->cq));
952 * Give back a drop flow rule resources instance.
955 * Pointer to drop flow rule resources.
958 mlx4_drop_put(struct mlx4_drop *drop)
960 assert(drop->refcnt);
963 drop->priv->drop = NULL;
964 claim_zero(mlx4_glue->destroy_qp(drop->qp));
965 claim_zero(mlx4_glue->destroy_cq(drop->cq));
970 * Toggle a configured flow rule.
973 * Pointer to private structure.
975 * Flow rule handle to toggle.
977 * Whether associated Verbs flow must be created or removed.
979 * Perform verbose error reporting if not NULL.
982 * 0 on success, a negative errno value otherwise and rte_errno is set.
985 mlx4_flow_toggle(struct priv *priv,
986 struct rte_flow *flow,
988 struct rte_flow_error *error)
990 struct ibv_qp *qp = NULL;
997 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
998 flow->ibv_flow = NULL;
1000 mlx4_drop_put(priv->drop);
1002 mlx4_rss_detach(flow->rss);
1005 assert(flow->ibv_attr);
1006 if (!flow->internal &&
1008 flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
1009 if (flow->ibv_flow) {
1010 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1011 flow->ibv_flow = NULL;
1013 mlx4_drop_put(priv->drop);
1015 mlx4_rss_detach(flow->rss);
1018 msg = ("priority level "
1019 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
1020 " is reserved when not in isolated mode");
1024 struct mlx4_rss *rss = flow->rss;
1028 /* Stop at the first nonexistent target queue. */
1029 for (i = 0; i != rss->queues; ++i)
1030 if (rss->queue_id[i] >=
1031 priv->dev->data->nb_rx_queues ||
1032 !priv->dev->data->rx_queues[rss->queue_id[i]]) {
1036 if (flow->ibv_flow) {
1037 if (missing ^ !flow->drop)
1039 /* Verbs flow needs updating. */
1040 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1041 flow->ibv_flow = NULL;
1043 mlx4_drop_put(priv->drop);
1045 mlx4_rss_detach(rss);
1048 err = mlx4_rss_attach(rss);
1051 msg = "cannot create indirection table or hash"
1052 " QP to associate flow rule with";
1057 /* A missing target queue drops traffic implicitly. */
1058 flow->drop = missing;
1061 mlx4_drop_get(priv);
1064 msg = "resources for drop flow rule cannot be created";
1067 qp = priv->drop->qp;
1072 flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
1076 mlx4_drop_put(priv->drop);
1078 mlx4_rss_detach(flow->rss);
1080 msg = "flow rule rejected by device";
1082 return rte_flow_error_set
1083 (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
1089 * @see rte_flow_create()
1092 static struct rte_flow *
1093 mlx4_flow_create(struct rte_eth_dev *dev,
1094 const struct rte_flow_attr *attr,
1095 const struct rte_flow_item pattern[],
1096 const struct rte_flow_action actions[],
1097 struct rte_flow_error *error)
1099 struct priv *priv = dev->data->dev_private;
1100 struct rte_flow *flow;
1103 err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
1106 err = mlx4_flow_toggle(priv, flow, priv->started, error);
1108 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1110 /* New rules are inserted after internal ones. */
1111 if (!curr || !curr->internal) {
1112 LIST_INSERT_HEAD(&priv->flows, flow, next);
1114 while (LIST_NEXT(curr, next) &&
1115 LIST_NEXT(curr, next)->internal)
1116 curr = LIST_NEXT(curr, next);
1117 LIST_INSERT_AFTER(curr, flow, next);
1122 mlx4_rss_put(flow->rss);
1123 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1130 * Configure isolated mode.
1132 * @see rte_flow_isolate()
1136 mlx4_flow_isolate(struct rte_eth_dev *dev,
1138 struct rte_flow_error *error)
1140 struct priv *priv = dev->data->dev_private;
1142 if (!!enable == !!priv->isolated)
1144 priv->isolated = !!enable;
1145 if (mlx4_flow_sync(priv, error)) {
1146 priv->isolated = !enable;
1153 * Destroy a flow rule.
1155 * @see rte_flow_destroy()
1159 mlx4_flow_destroy(struct rte_eth_dev *dev,
1160 struct rte_flow *flow,
1161 struct rte_flow_error *error)
1163 struct priv *priv = dev->data->dev_private;
1164 int err = mlx4_flow_toggle(priv, flow, 0, error);
1168 LIST_REMOVE(flow, next);
1170 mlx4_rss_put(flow->rss);
1176 * Destroy user-configured flow rules.
1178 * This function skips internal flows rules.
1180 * @see rte_flow_flush()
1184 mlx4_flow_flush(struct rte_eth_dev *dev,
1185 struct rte_flow_error *error)
1187 struct priv *priv = dev->data->dev_private;
1188 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1191 struct rte_flow *next = LIST_NEXT(flow, next);
1193 if (!flow->internal)
1194 mlx4_flow_destroy(dev, flow, error);
1201 * Helper function to determine the next configured VLAN filter.
1204 * Pointer to private structure.
1206 * VLAN ID to use as a starting point.
1209 * Next configured VLAN ID or a high value (>= 4096) if there is none.
1212 mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
1214 while (vlan < 4096) {
1215 if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
1216 (UINT64_C(1) << (vlan % 64)))
1224 * Generate internal flow rules.
1226 * Various flow rules are created depending on the mode the device is in:
1229 * port MAC + broadcast + catch-all (VLAN filtering is ignored).
1231 * port MAC/VLAN + broadcast + catch-all multicast.
1233 * port MAC/VLAN + broadcast MAC/VLAN.
1235 * About MAC flow rules:
1237 * - MAC flow rules are generated from @p dev->data->mac_addrs
1238 * (@p priv->mac array).
1239 * - An additional flow rule for Ethernet broadcasts is also generated.
1240 * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
1241 * is enabled and VLAN filters are configured.
1244 * Pointer to private structure.
1246 * Perform verbose error reporting if not NULL.
1249 * 0 on success, a negative errno value otherwise and rte_errno is set.
1252 mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
1254 struct rte_flow_attr attr = {
1255 .priority = MLX4_FLOW_PRIORITY_LAST,
1258 struct rte_flow_item_eth eth_spec;
1259 const struct rte_flow_item_eth eth_mask = {
1260 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1262 const struct rte_flow_item_eth eth_allmulti = {
1263 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1265 struct rte_flow_item_vlan vlan_spec;
1266 const struct rte_flow_item_vlan vlan_mask = {
1267 .tci = RTE_BE16(0x0fff),
1269 struct rte_flow_item pattern[] = {
1271 .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
1274 .type = RTE_FLOW_ITEM_TYPE_ETH,
1279 /* Replaced with VLAN if filtering is enabled. */
1280 .type = RTE_FLOW_ITEM_TYPE_END,
1283 .type = RTE_FLOW_ITEM_TYPE_END,
1287 * Round number of queues down to their previous power of 2 to
1288 * comply with RSS context limitations. Extra queues silently do not
1289 * get RSS by default.
1292 rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
1293 alignas(struct rte_flow_action_rss) uint8_t rss_conf_data
1294 [offsetof(struct rte_flow_action_rss, queue) +
1295 sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues];
1296 struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data;
1297 struct rte_flow_action actions[] = {
1299 .type = RTE_FLOW_ACTION_TYPE_RSS,
1303 .type = RTE_FLOW_ACTION_TYPE_END,
1306 struct ether_addr *rule_mac = ð_spec.dst;
1307 rte_be16_t *rule_vlan =
1308 (priv->dev->data->dev_conf.rxmode.offloads &
1309 DEV_RX_OFFLOAD_VLAN_FILTER) &&
1310 !priv->dev->data->promiscuous ?
1314 struct rte_flow *flow;
1318 /* Nothing to be done if there are no Rx queues. */
1321 /* Prepare default RSS configuration. */
1322 *rss_conf = (struct rte_flow_action_rss){
1323 .rss_conf = NULL, /* Rely on default fallback settings. */
1326 for (i = 0; i != queues; ++i)
1327 rss_conf->queue[i] = i;
1329 * Set up VLAN item if filtering is enabled and at least one VLAN
1330 * filter is configured.
1333 vlan = mlx4_flow_internal_next_vlan(priv, 0);
1335 pattern[2] = (struct rte_flow_item){
1336 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1341 *rule_vlan = rte_cpu_to_be_16(vlan);
1346 for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
1347 const struct ether_addr *mac;
1349 /* Broadcasts are handled by an extra iteration. */
1350 if (i < RTE_DIM(priv->mac))
1351 mac = &priv->mac[i];
1353 mac = ð_mask.dst;
1354 if (is_zero_ether_addr(mac))
1356 /* Check if MAC flow rule is already present. */
1357 for (flow = LIST_FIRST(&priv->flows);
1358 flow && flow->internal;
1359 flow = LIST_NEXT(flow, next)) {
1360 const struct ibv_flow_spec_eth *eth =
1361 (const void *)((uintptr_t)flow->ibv_attr +
1362 sizeof(*flow->ibv_attr));
1367 assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
1368 assert(flow->ibv_attr->num_of_specs == 1);
1369 assert(eth->type == IBV_FLOW_SPEC_ETH);
1372 (eth->val.vlan_tag != *rule_vlan ||
1373 eth->mask.vlan_tag != RTE_BE16(0x0fff)))
1375 if (!rule_vlan && eth->mask.vlan_tag)
1377 for (j = 0; j != sizeof(mac->addr_bytes); ++j)
1378 if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
1379 eth->mask.dst_mac[j] != UINT8_C(0xff) ||
1380 eth->val.src_mac[j] != UINT8_C(0x00) ||
1381 eth->mask.src_mac[j] != UINT8_C(0x00))
1383 if (j != sizeof(mac->addr_bytes))
1385 if (flow->rss->queues != queues ||
1386 memcmp(flow->rss->queue_id, rss_conf->queue,
1387 queues * sizeof(flow->rss->queue_id[0])))
1391 if (!flow || !flow->internal) {
1392 /* Not found, create a new flow rule. */
1393 memcpy(rule_mac, mac, sizeof(*mac));
1394 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1405 vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
1409 /* Take care of promiscuous and all multicast flow rules. */
1410 if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
1411 for (flow = LIST_FIRST(&priv->flows);
1412 flow && flow->internal;
1413 flow = LIST_NEXT(flow, next)) {
1414 if (priv->dev->data->promiscuous) {
1418 assert(priv->dev->data->all_multicast);
1423 if (flow && flow->internal) {
1425 if (flow->rss->queues != queues ||
1426 memcmp(flow->rss->queue_id, rss_conf->queue,
1427 queues * sizeof(flow->rss->queue_id[0])))
1430 if (!flow || !flow->internal) {
1431 /* Not found, create a new flow rule. */
1432 if (priv->dev->data->promiscuous) {
1433 pattern[1].spec = NULL;
1434 pattern[1].mask = NULL;
1436 assert(priv->dev->data->all_multicast);
1437 pattern[1].spec = ð_allmulti;
1438 pattern[1].mask = ð_allmulti;
1440 pattern[2] = pattern[3];
1441 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1448 assert(flow->promisc || flow->allmulti);
1452 /* Clear selection and clean up stale internal flow rules. */
1453 flow = LIST_FIRST(&priv->flows);
1454 while (flow && flow->internal) {
1455 struct rte_flow *next = LIST_NEXT(flow, next);
1458 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1467 * Synchronize flow rules.
1469 * This function synchronizes flow rules with the state of the device by
1470 * taking into account isolated mode and whether target queues are
1474 * Pointer to private structure.
1476 * Perform verbose error reporting if not NULL.
1479 * 0 on success, a negative errno value otherwise and rte_errno is set.
1482 mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
1484 struct rte_flow *flow;
1487 /* Internal flow rules are guaranteed to come first in the list. */
1488 if (priv->isolated) {
1490 * Get rid of them in isolated mode, stop at the first
1491 * non-internal rule found.
1493 for (flow = LIST_FIRST(&priv->flows);
1494 flow && flow->internal;
1495 flow = LIST_FIRST(&priv->flows))
1496 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1498 /* Refresh internal rules. */
1499 ret = mlx4_flow_internal(priv, error);
1503 /* Toggle the remaining flow rules . */
1504 LIST_FOREACH(flow, &priv->flows, next) {
1505 ret = mlx4_flow_toggle(priv, flow, priv->started, error);
1510 assert(!priv->drop);
1515 * Clean up all flow rules.
1517 * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
1518 * rules regardless of whether they are internal or user-configured.
1521 * Pointer to private structure.
1524 mlx4_flow_clean(struct priv *priv)
1526 struct rte_flow *flow;
1528 while ((flow = LIST_FIRST(&priv->flows)))
1529 mlx4_flow_destroy(priv->dev, flow, NULL);
1530 assert(LIST_EMPTY(&priv->rss));
1533 static const struct rte_flow_ops mlx4_flow_ops = {
1534 .validate = mlx4_flow_validate,
1535 .create = mlx4_flow_create,
1536 .destroy = mlx4_flow_destroy,
1537 .flush = mlx4_flow_flush,
1538 .isolate = mlx4_flow_isolate,
1542 * Manage filter operations.
1545 * Pointer to Ethernet device structure.
1546 * @param filter_type
1549 * Operation to perform.
1551 * Pointer to operation-specific structure.
1554 * 0 on success, negative errno value otherwise and rte_errno is set.
1557 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1558 enum rte_filter_type filter_type,
1559 enum rte_filter_op filter_op,
1562 switch (filter_type) {
1563 case RTE_ETH_FILTER_GENERIC:
1564 if (filter_op != RTE_ETH_FILTER_GET)
1566 *(const void **)arg = &mlx4_flow_ops;
1569 ERROR("%p: filter type (%d) not supported",
1570 (void *)dev, filter_type);
1573 rte_errno = ENOTSUP;