1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Flow API operations for mlx4 driver.
11 #include <arpa/inet.h>
18 #include <sys/queue.h>
20 /* Verbs headers do not support -pedantic. */
22 #pragma GCC diagnostic ignored "-Wpedantic"
24 #include <infiniband/verbs.h>
26 #pragma GCC diagnostic error "-Wpedantic"
29 #include <rte_byteorder.h>
30 #include <rte_errno.h>
31 #include <rte_eth_ctrl.h>
32 #include <rte_ethdev_driver.h>
33 #include <rte_ether.h>
35 #include <rte_flow_driver.h>
36 #include <rte_malloc.h>
40 #include "mlx4_glue.h"
41 #include "mlx4_flow.h"
42 #include "mlx4_rxtx.h"
43 #include "mlx4_utils.h"
45 /** Static initializer for a list of subsequent item types. */
46 #define NEXT_ITEM(...) \
47 (const enum rte_flow_item_type []){ \
48 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
51 /** Processor structure associated with a flow item. */
52 struct mlx4_flow_proc_item {
53 /** Bit-mask for fields supported by this PMD. */
54 const void *mask_support;
55 /** Bit-mask to use when @p item->mask is not provided. */
56 const void *mask_default;
57 /** Size in bytes for @p mask_support and @p mask_default. */
58 const unsigned int mask_sz;
59 /** Merge a pattern item into a flow rule handle. */
60 int (*merge)(struct rte_flow *flow,
61 const struct rte_flow_item *item,
62 const struct mlx4_flow_proc_item *proc,
63 struct rte_flow_error *error);
64 /** Size in bytes of the destination structure. */
65 const unsigned int dst_sz;
66 /** List of possible subsequent items. */
67 const enum rte_flow_item_type *const next_item;
70 /** Shared resources for drop flow rules. */
72 struct ibv_qp *qp; /**< QP target. */
73 struct ibv_cq *cq; /**< CQ associated with above QP. */
74 struct priv *priv; /**< Back pointer to private data. */
75 uint32_t refcnt; /**< Reference count. */
79 * Convert supported RSS hash field types between DPDK and Verbs formats.
81 * This function returns the supported (default) set when @p types has
85 * Pointer to private structure.
87 * Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct
88 * rte_eth_rss_conf) or Verbs format.
89 * @param verbs_to_dpdk
90 * A zero value converts @p types from DPDK to Verbs, a nonzero value
91 * performs the reverse operation.
94 * Converted RSS hash fields on success, (uint64_t)-1 otherwise and
98 mlx4_conv_rss_types(struct priv *priv, uint64_t types, int verbs_to_dpdk)
102 IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3,
104 IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1,
107 VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
108 VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
109 VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
110 VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
112 static const uint64_t dpdk[] = {
114 [IPV4] = ETH_RSS_IPV4,
115 [IPV4_1] = ETH_RSS_FRAG_IPV4,
116 [IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
117 [IPV6] = ETH_RSS_IPV6,
118 [IPV6_1] = ETH_RSS_FRAG_IPV6,
119 [IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
120 [IPV6_3] = ETH_RSS_IPV6_EX,
123 [IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
124 [IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
125 [IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
126 [IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
127 [IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
128 [IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
130 static const uint64_t verbs[RTE_DIM(dpdk)] = {
131 [INNER] = IBV_RX_HASH_INNER,
133 [IPV4_1] = VERBS_IPV4,
134 [IPV4_2] = VERBS_IPV4,
136 [IPV6_1] = VERBS_IPV6,
137 [IPV6_2] = VERBS_IPV6,
138 [IPV6_3] = VERBS_IPV6,
141 [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP,
142 [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP,
143 [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP,
144 [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP,
145 [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP,
146 [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP,
148 const uint64_t *in = verbs_to_dpdk ? verbs : dpdk;
149 const uint64_t *out = verbs_to_dpdk ? dpdk : verbs;
156 return priv->hw_rss_sup;
157 types = priv->hw_rss_sup;
159 for (i = 0; i != RTE_DIM(dpdk); ++i)
160 if (in[i] && (types & in[i]) == in[i]) {
161 seen |= types & in[i];
164 if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) &&
172 * Merge Ethernet pattern item into flow rule handle.
174 * Additional mlx4-specific constraints on supported fields:
176 * - No support for partial masks, except in the specific case of matching
177 * all multicast traffic (@p spec->dst and @p mask->dst equal to
178 * 01:00:00:00:00:00).
179 * - Not providing @p item->spec or providing an empty @p mask->dst is
180 * *only* supported if the rule doesn't specify additional matching
181 * criteria (i.e. rule is promiscuous-like).
183 * @param[in, out] flow
184 * Flow rule handle to update.
186 * Pattern item to merge.
188 * Associated item-processing object.
190 * Perform verbose error reporting if not NULL.
193 * 0 on success, a negative errno value otherwise and rte_errno is set.
196 mlx4_flow_merge_eth(struct rte_flow *flow,
197 const struct rte_flow_item *item,
198 const struct mlx4_flow_proc_item *proc,
199 struct rte_flow_error *error)
201 const struct rte_flow_item_eth *spec = item->spec;
202 const struct rte_flow_item_eth *mask =
203 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
204 struct ibv_flow_spec_eth *eth;
211 uint32_t sum_dst = 0;
212 uint32_t sum_src = 0;
214 for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
215 sum_dst += mask->dst.addr_bytes[i];
216 sum_src += mask->src.addr_bytes[i];
219 msg = "mlx4 does not support source MAC matching";
221 } else if (!sum_dst) {
223 } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
224 if (!(spec->dst.addr_bytes[0] & 1)) {
225 msg = "mlx4 does not support the explicit"
226 " exclusion of all multicast traffic";
230 } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
231 msg = "mlx4 does not support matching partial"
239 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
242 if (flow->allmulti) {
243 flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
246 ++flow->ibv_attr->num_of_specs;
247 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
248 *eth = (struct ibv_flow_spec_eth) {
249 .type = IBV_FLOW_SPEC_ETH,
250 .size = sizeof(*eth),
252 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
253 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
254 /* Remove unwanted bits from values. */
255 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
256 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
260 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
265 * Merge VLAN pattern item into flow rule handle.
267 * Additional mlx4-specific constraints on supported fields:
269 * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
270 * empty @p item->mask would also include non-VLAN traffic. Doing so is
271 * therefore unsupported.
272 * - No support for partial masks.
274 * @param[in, out] flow
275 * Flow rule handle to update.
277 * Pattern item to merge.
279 * Associated item-processing object.
281 * Perform verbose error reporting if not NULL.
284 * 0 on success, a negative errno value otherwise and rte_errno is set.
287 mlx4_flow_merge_vlan(struct rte_flow *flow,
288 const struct rte_flow_item *item,
289 const struct mlx4_flow_proc_item *proc,
290 struct rte_flow_error *error)
292 const struct rte_flow_item_vlan *spec = item->spec;
293 const struct rte_flow_item_vlan *mask =
294 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
295 struct ibv_flow_spec_eth *eth;
298 if (!mask || !mask->tci) {
299 msg = "mlx4 cannot match all VLAN traffic while excluding"
300 " non-VLAN traffic, TCI VID must be specified";
303 if (mask->tci != RTE_BE16(0x0fff)) {
304 msg = "mlx4 does not support partial TCI VID matching";
309 eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
311 eth->val.vlan_tag = spec->tci;
312 eth->mask.vlan_tag = mask->tci;
313 eth->val.vlan_tag &= eth->mask.vlan_tag;
316 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
321 * Merge IPv4 pattern item into flow rule handle.
323 * Additional mlx4-specific constraints on supported fields:
325 * - No support for partial masks.
327 * @param[in, out] flow
328 * Flow rule handle to update.
330 * Pattern item to merge.
332 * Associated item-processing object.
334 * Perform verbose error reporting if not NULL.
337 * 0 on success, a negative errno value otherwise and rte_errno is set.
340 mlx4_flow_merge_ipv4(struct rte_flow *flow,
341 const struct rte_flow_item *item,
342 const struct mlx4_flow_proc_item *proc,
343 struct rte_flow_error *error)
345 const struct rte_flow_item_ipv4 *spec = item->spec;
346 const struct rte_flow_item_ipv4 *mask =
347 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
348 struct ibv_flow_spec_ipv4 *ipv4;
352 ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
353 (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
354 msg = "mlx4 does not support matching partial IPv4 fields";
359 ++flow->ibv_attr->num_of_specs;
360 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
361 *ipv4 = (struct ibv_flow_spec_ipv4) {
362 .type = IBV_FLOW_SPEC_IPV4,
363 .size = sizeof(*ipv4),
367 ipv4->val = (struct ibv_flow_ipv4_filter) {
368 .src_ip = spec->hdr.src_addr,
369 .dst_ip = spec->hdr.dst_addr,
371 ipv4->mask = (struct ibv_flow_ipv4_filter) {
372 .src_ip = mask->hdr.src_addr,
373 .dst_ip = mask->hdr.dst_addr,
375 /* Remove unwanted bits from values. */
376 ipv4->val.src_ip &= ipv4->mask.src_ip;
377 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
380 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
385 * Merge UDP pattern item into flow rule handle.
387 * Additional mlx4-specific constraints on supported fields:
389 * - No support for partial masks.
390 * - Due to HW/FW limitation, flow rule priority is not taken into account
391 * when matching UDP destination ports, doing is therefore only supported
392 * at the highest priority level (0).
394 * @param[in, out] flow
395 * Flow rule handle to update.
397 * Pattern item to merge.
399 * Associated item-processing object.
401 * Perform verbose error reporting if not NULL.
404 * 0 on success, a negative errno value otherwise and rte_errno is set.
407 mlx4_flow_merge_udp(struct rte_flow *flow,
408 const struct rte_flow_item *item,
409 const struct mlx4_flow_proc_item *proc,
410 struct rte_flow_error *error)
412 const struct rte_flow_item_udp *spec = item->spec;
413 const struct rte_flow_item_udp *mask =
414 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
415 struct ibv_flow_spec_tcp_udp *udp;
419 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
420 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
421 msg = "mlx4 does not support matching partial UDP fields";
424 if (mask && mask->hdr.dst_port && flow->priority) {
425 msg = "combining UDP destination port matching with a nonzero"
426 " priority level is not supported";
431 ++flow->ibv_attr->num_of_specs;
432 udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
433 *udp = (struct ibv_flow_spec_tcp_udp) {
434 .type = IBV_FLOW_SPEC_UDP,
435 .size = sizeof(*udp),
439 udp->val.dst_port = spec->hdr.dst_port;
440 udp->val.src_port = spec->hdr.src_port;
441 udp->mask.dst_port = mask->hdr.dst_port;
442 udp->mask.src_port = mask->hdr.src_port;
443 /* Remove unwanted bits from values. */
444 udp->val.src_port &= udp->mask.src_port;
445 udp->val.dst_port &= udp->mask.dst_port;
448 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
453 * Merge TCP pattern item into flow rule handle.
455 * Additional mlx4-specific constraints on supported fields:
457 * - No support for partial masks.
459 * @param[in, out] flow
460 * Flow rule handle to update.
462 * Pattern item to merge.
464 * Associated item-processing object.
466 * Perform verbose error reporting if not NULL.
469 * 0 on success, a negative errno value otherwise and rte_errno is set.
472 mlx4_flow_merge_tcp(struct rte_flow *flow,
473 const struct rte_flow_item *item,
474 const struct mlx4_flow_proc_item *proc,
475 struct rte_flow_error *error)
477 const struct rte_flow_item_tcp *spec = item->spec;
478 const struct rte_flow_item_tcp *mask =
479 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
480 struct ibv_flow_spec_tcp_udp *tcp;
484 ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
485 (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
486 msg = "mlx4 does not support matching partial TCP fields";
491 ++flow->ibv_attr->num_of_specs;
492 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
493 *tcp = (struct ibv_flow_spec_tcp_udp) {
494 .type = IBV_FLOW_SPEC_TCP,
495 .size = sizeof(*tcp),
499 tcp->val.dst_port = spec->hdr.dst_port;
500 tcp->val.src_port = spec->hdr.src_port;
501 tcp->mask.dst_port = mask->hdr.dst_port;
502 tcp->mask.src_port = mask->hdr.src_port;
503 /* Remove unwanted bits from values. */
504 tcp->val.src_port &= tcp->mask.src_port;
505 tcp->val.dst_port &= tcp->mask.dst_port;
508 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
513 * Perform basic sanity checks on a pattern item.
516 * Item specification.
518 * Associated item-processing object.
520 * Perform verbose error reporting if not NULL.
523 * 0 on success, a negative errno value otherwise and rte_errno is set.
526 mlx4_flow_item_check(const struct rte_flow_item *item,
527 const struct mlx4_flow_proc_item *proc,
528 struct rte_flow_error *error)
533 /* item->last and item->mask cannot exist without item->spec. */
534 if (!item->spec && (item->mask || item->last))
535 return rte_flow_error_set
536 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
537 "\"mask\" or \"last\" field provided without a"
538 " corresponding \"spec\"");
539 /* No spec, no mask, no problem. */
543 (const uint8_t *)item->mask :
544 (const uint8_t *)proc->mask_default;
547 * Single-pass check to make sure that:
548 * - Mask is supported, no bits are set outside proc->mask_support.
549 * - Both item->spec and item->last are included in mask.
551 for (i = 0; i != proc->mask_sz; ++i) {
554 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
555 ((const uint8_t *)proc->mask_support)[i])
556 return rte_flow_error_set
557 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
558 item, "unsupported field found in \"mask\"");
560 (((const uint8_t *)item->spec)[i] & mask[i]) !=
561 (((const uint8_t *)item->last)[i] & mask[i]))
562 return rte_flow_error_set
563 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
565 "range between \"spec\" and \"last\""
566 " is larger than \"mask\"");
571 /** Graph of supported items and associated actions. */
572 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
573 [RTE_FLOW_ITEM_TYPE_END] = {
574 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
576 [RTE_FLOW_ITEM_TYPE_ETH] = {
577 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
578 RTE_FLOW_ITEM_TYPE_IPV4),
579 .mask_support = &(const struct rte_flow_item_eth){
580 /* Only destination MAC can be matched. */
581 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
583 .mask_default = &rte_flow_item_eth_mask,
584 .mask_sz = sizeof(struct rte_flow_item_eth),
585 .merge = mlx4_flow_merge_eth,
586 .dst_sz = sizeof(struct ibv_flow_spec_eth),
588 [RTE_FLOW_ITEM_TYPE_VLAN] = {
589 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
590 .mask_support = &(const struct rte_flow_item_vlan){
591 /* Only TCI VID matching is supported. */
592 .tci = RTE_BE16(0x0fff),
594 .mask_default = &rte_flow_item_vlan_mask,
595 .mask_sz = sizeof(struct rte_flow_item_vlan),
596 .merge = mlx4_flow_merge_vlan,
599 [RTE_FLOW_ITEM_TYPE_IPV4] = {
600 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
601 RTE_FLOW_ITEM_TYPE_TCP),
602 .mask_support = &(const struct rte_flow_item_ipv4){
604 .src_addr = RTE_BE32(0xffffffff),
605 .dst_addr = RTE_BE32(0xffffffff),
608 .mask_default = &rte_flow_item_ipv4_mask,
609 .mask_sz = sizeof(struct rte_flow_item_ipv4),
610 .merge = mlx4_flow_merge_ipv4,
611 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
613 [RTE_FLOW_ITEM_TYPE_UDP] = {
614 .mask_support = &(const struct rte_flow_item_udp){
616 .src_port = RTE_BE16(0xffff),
617 .dst_port = RTE_BE16(0xffff),
620 .mask_default = &rte_flow_item_udp_mask,
621 .mask_sz = sizeof(struct rte_flow_item_udp),
622 .merge = mlx4_flow_merge_udp,
623 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
625 [RTE_FLOW_ITEM_TYPE_TCP] = {
626 .mask_support = &(const struct rte_flow_item_tcp){
628 .src_port = RTE_BE16(0xffff),
629 .dst_port = RTE_BE16(0xffff),
632 .mask_default = &rte_flow_item_tcp_mask,
633 .mask_sz = sizeof(struct rte_flow_item_tcp),
634 .merge = mlx4_flow_merge_tcp,
635 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
640 * Make sure a flow rule is supported and initialize associated structure.
643 * Pointer to private structure.
645 * Flow rule attributes.
647 * Pattern specification (list terminated by the END pattern item).
649 * Associated actions (list terminated by the END action).
651 * Perform verbose error reporting if not NULL.
652 * @param[in, out] addr
653 * Buffer where the resulting flow rule handle pointer must be stored.
654 * If NULL, stop processing after validation stage.
657 * 0 on success, a negative errno value otherwise and rte_errno is set.
660 mlx4_flow_prepare(struct priv *priv,
661 const struct rte_flow_attr *attr,
662 const struct rte_flow_item pattern[],
663 const struct rte_flow_action actions[],
664 struct rte_flow_error *error,
665 struct rte_flow **addr)
667 const struct rte_flow_item *item;
668 const struct rte_flow_action *action;
669 const struct mlx4_flow_proc_item *proc;
670 struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
671 struct rte_flow *flow = &temp;
672 const char *msg = NULL;
676 return rte_flow_error_set
677 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
678 NULL, "groups are not supported");
679 if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
680 return rte_flow_error_set
681 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
682 NULL, "maximum priority level is "
683 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
685 return rte_flow_error_set
686 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
687 NULL, "egress is not supported");
689 return rte_flow_error_set
690 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
691 NULL, "transfer is not supported");
693 return rte_flow_error_set
694 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
695 NULL, "only ingress is supported");
698 proc = mlx4_flow_proc_item_list;
699 flow->priority = attr->priority;
700 /* Go over pattern. */
701 for (item = pattern; item->type; ++item) {
702 const struct mlx4_flow_proc_item *next = NULL;
706 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
708 if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
712 if (flow->promisc || flow->allmulti) {
713 msg = "mlx4 does not support additional matching"
714 " criteria combined with indiscriminate"
715 " matching on Ethernet headers";
716 goto exit_item_not_supported;
718 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
719 if (proc->next_item[i] == item->type) {
720 next = &mlx4_flow_proc_item_list[item->type];
725 goto exit_item_not_supported;
728 * Perform basic sanity checks only once, while handle is
732 err = mlx4_flow_item_check(item, proc, error);
737 err = proc->merge(flow, item, proc, error);
741 flow->ibv_attr_size += proc->dst_sz;
743 /* Go over actions list. */
744 for (action = actions; action->type; ++action) {
745 /* This one may appear anywhere multiple times. */
746 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
748 /* Fate-deciding actions may appear exactly once. */
750 msg = "cannot combine several fate-deciding actions,"
751 " choose between DROP, QUEUE or RSS";
752 goto exit_action_not_supported;
755 switch (action->type) {
756 const struct rte_flow_action_queue *queue;
757 const struct rte_flow_action_rss *rss;
758 const uint8_t *rss_key;
759 uint32_t rss_key_len;
763 case RTE_FLOW_ACTION_TYPE_DROP:
766 case RTE_FLOW_ACTION_TYPE_QUEUE:
769 queue = action->conf;
770 if (queue->index >= priv->dev->data->nb_rx_queues) {
771 msg = "queue target index beyond number of"
772 " configured Rx queues";
773 goto exit_action_not_supported;
775 flow->rss = mlx4_rss_get
776 (priv, 0, mlx4_rss_hash_key_default, 1,
779 msg = "not enough resources for additional"
780 " single-queue RSS context";
781 goto exit_action_not_supported;
784 case RTE_FLOW_ACTION_TYPE_RSS:
788 /* Default RSS configuration if none is provided. */
791 rss_key_len = rss->key_len;
793 rss_key = mlx4_rss_hash_key_default;
794 rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
797 for (i = 0; i < rss->queue_num; ++i)
799 priv->dev->data->nb_rx_queues)
801 if (i != rss->queue_num) {
802 msg = "queue index target beyond number of"
803 " configured Rx queues";
804 goto exit_action_not_supported;
806 if (!rte_is_power_of_2(rss->queue_num)) {
807 msg = "for RSS, mlx4 requires the number of"
808 " queues to be a power of two";
809 goto exit_action_not_supported;
811 if (rss_key_len != sizeof(flow->rss->key)) {
812 msg = "mlx4 supports exactly one RSS hash key"
814 MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
815 goto exit_action_not_supported;
817 for (i = 1; i < rss->queue_num; ++i)
818 if (rss->queue[i] - rss->queue[i - 1] != 1)
820 if (i != rss->queue_num) {
821 msg = "mlx4 requires RSS contexts to use"
822 " consecutive queue indices only";
823 goto exit_action_not_supported;
825 if (rss->queue[0] % rss->queue_num) {
826 msg = "mlx4 requires the first queue of a RSS"
827 " context to be aligned on a multiple"
828 " of the context size";
829 goto exit_action_not_supported;
832 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
833 msg = "the only supported RSS hash function"
835 goto exit_action_not_supported;
838 msg = "a nonzero RSS encapsulation level is"
840 goto exit_action_not_supported;
843 fields = mlx4_conv_rss_types(priv, rss->types, 0);
844 if (fields == (uint64_t)-1 && rte_errno) {
845 msg = "unsupported RSS hash type requested";
846 goto exit_action_not_supported;
848 flow->rss = mlx4_rss_get
849 (priv, fields, rss_key, rss->queue_num,
852 msg = "either invalid parameters or not enough"
853 " resources for additional multi-queue"
855 goto exit_action_not_supported;
859 goto exit_action_not_supported;
862 /* When fate is unknown, drop traffic. */
865 /* Validation ends here. */
868 mlx4_rss_put(flow->rss);
872 /* Allocate proper handle based on collected data. */
873 const struct mlx4_malloc_vec vec[] = {
875 .align = alignof(struct rte_flow),
876 .size = sizeof(*flow),
877 .addr = (void **)&flow,
880 .align = alignof(struct ibv_flow_attr),
881 .size = temp.ibv_attr_size,
882 .addr = (void **)&temp.ibv_attr,
886 if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) {
888 mlx4_rss_put(temp.rss);
889 return rte_flow_error_set
891 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
892 "flow rule handle allocation failure");
894 /* Most fields will be updated by second pass. */
895 *flow = (struct rte_flow){
896 .ibv_attr = temp.ibv_attr,
897 .ibv_attr_size = sizeof(*flow->ibv_attr),
900 *flow->ibv_attr = (struct ibv_flow_attr){
901 .type = IBV_FLOW_ATTR_NORMAL,
902 .size = sizeof(*flow->ibv_attr),
903 .priority = attr->priority,
910 exit_item_not_supported:
911 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
912 item, msg ? msg : "item not supported");
913 exit_action_not_supported:
914 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
915 action, msg ? msg : "action not supported");
919 * Validate a flow supported by the NIC.
921 * @see rte_flow_validate()
925 mlx4_flow_validate(struct rte_eth_dev *dev,
926 const struct rte_flow_attr *attr,
927 const struct rte_flow_item pattern[],
928 const struct rte_flow_action actions[],
929 struct rte_flow_error *error)
931 struct priv *priv = dev->data->dev_private;
933 return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
937 * Get a drop flow rule resources instance.
940 * Pointer to private structure.
943 * Pointer to drop flow resources on success, NULL otherwise and rte_errno
946 static struct mlx4_drop *
947 mlx4_drop_get(struct priv *priv)
949 struct mlx4_drop *drop = priv->drop;
952 assert(drop->refcnt);
953 assert(drop->priv == priv);
957 drop = rte_malloc(__func__, sizeof(*drop), 0);
960 *drop = (struct mlx4_drop){
964 drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
967 drop->qp = mlx4_glue->create_qp
969 &(struct ibv_qp_init_attr){
972 .qp_type = IBV_QPT_RAW_PACKET,
980 claim_zero(mlx4_glue->destroy_qp(drop->qp));
982 claim_zero(mlx4_glue->destroy_cq(drop->cq));
990 * Give back a drop flow rule resources instance.
993 * Pointer to drop flow rule resources.
996 mlx4_drop_put(struct mlx4_drop *drop)
998 assert(drop->refcnt);
1001 drop->priv->drop = NULL;
1002 claim_zero(mlx4_glue->destroy_qp(drop->qp));
1003 claim_zero(mlx4_glue->destroy_cq(drop->cq));
1008 * Toggle a configured flow rule.
1011 * Pointer to private structure.
1013 * Flow rule handle to toggle.
1015 * Whether associated Verbs flow must be created or removed.
1017 * Perform verbose error reporting if not NULL.
1020 * 0 on success, a negative errno value otherwise and rte_errno is set.
1023 mlx4_flow_toggle(struct priv *priv,
1024 struct rte_flow *flow,
1026 struct rte_flow_error *error)
1028 struct ibv_qp *qp = NULL;
1033 if (!flow->ibv_flow)
1035 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1036 flow->ibv_flow = NULL;
1038 mlx4_drop_put(priv->drop);
1040 mlx4_rss_detach(flow->rss);
1043 assert(flow->ibv_attr);
1044 if (!flow->internal &&
1046 flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
1047 if (flow->ibv_flow) {
1048 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1049 flow->ibv_flow = NULL;
1051 mlx4_drop_put(priv->drop);
1053 mlx4_rss_detach(flow->rss);
1056 msg = ("priority level "
1057 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
1058 " is reserved when not in isolated mode");
1062 struct mlx4_rss *rss = flow->rss;
1066 /* Stop at the first nonexistent target queue. */
1067 for (i = 0; i != rss->queues; ++i)
1068 if (rss->queue_id[i] >=
1069 priv->dev->data->nb_rx_queues ||
1070 !priv->dev->data->rx_queues[rss->queue_id[i]]) {
1074 if (flow->ibv_flow) {
1075 if (missing ^ !flow->drop)
1077 /* Verbs flow needs updating. */
1078 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1079 flow->ibv_flow = NULL;
1081 mlx4_drop_put(priv->drop);
1083 mlx4_rss_detach(rss);
1086 err = mlx4_rss_attach(rss);
1089 msg = "cannot create indirection table or hash"
1090 " QP to associate flow rule with";
1095 /* A missing target queue drops traffic implicitly. */
1096 flow->drop = missing;
1101 mlx4_drop_get(priv);
1104 msg = "resources for drop flow rule cannot be created";
1107 qp = priv->drop->qp;
1112 flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
1116 mlx4_drop_put(priv->drop);
1118 mlx4_rss_detach(flow->rss);
1120 msg = "flow rule rejected by device";
1122 return rte_flow_error_set
1123 (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
1129 * @see rte_flow_create()
1132 static struct rte_flow *
1133 mlx4_flow_create(struct rte_eth_dev *dev,
1134 const struct rte_flow_attr *attr,
1135 const struct rte_flow_item pattern[],
1136 const struct rte_flow_action actions[],
1137 struct rte_flow_error *error)
1139 struct priv *priv = dev->data->dev_private;
1140 struct rte_flow *flow;
1143 err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
1146 err = mlx4_flow_toggle(priv, flow, priv->started, error);
1148 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1150 /* New rules are inserted after internal ones. */
1151 if (!curr || !curr->internal) {
1152 LIST_INSERT_HEAD(&priv->flows, flow, next);
1154 while (LIST_NEXT(curr, next) &&
1155 LIST_NEXT(curr, next)->internal)
1156 curr = LIST_NEXT(curr, next);
1157 LIST_INSERT_AFTER(curr, flow, next);
1162 mlx4_rss_put(flow->rss);
1163 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1170 * Configure isolated mode.
1172 * @see rte_flow_isolate()
1176 mlx4_flow_isolate(struct rte_eth_dev *dev,
1178 struct rte_flow_error *error)
1180 struct priv *priv = dev->data->dev_private;
1182 if (!!enable == !!priv->isolated)
1184 priv->isolated = !!enable;
1185 if (mlx4_flow_sync(priv, error)) {
1186 priv->isolated = !enable;
1193 * Destroy a flow rule.
1195 * @see rte_flow_destroy()
1199 mlx4_flow_destroy(struct rte_eth_dev *dev,
1200 struct rte_flow *flow,
1201 struct rte_flow_error *error)
1203 struct priv *priv = dev->data->dev_private;
1204 int err = mlx4_flow_toggle(priv, flow, 0, error);
1208 LIST_REMOVE(flow, next);
1210 mlx4_rss_put(flow->rss);
1216 * Destroy user-configured flow rules.
1218 * This function skips internal flows rules.
1220 * @see rte_flow_flush()
1224 mlx4_flow_flush(struct rte_eth_dev *dev,
1225 struct rte_flow_error *error)
1227 struct priv *priv = dev->data->dev_private;
1228 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1231 struct rte_flow *next = LIST_NEXT(flow, next);
1233 if (!flow->internal)
1234 mlx4_flow_destroy(dev, flow, error);
1241 * Helper function to determine the next configured VLAN filter.
1244 * Pointer to private structure.
1246 * VLAN ID to use as a starting point.
1249 * Next configured VLAN ID or a high value (>= 4096) if there is none.
1252 mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
1254 while (vlan < 4096) {
1255 if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
1256 (UINT64_C(1) << (vlan % 64)))
1264 * Generate internal flow rules.
1266 * Various flow rules are created depending on the mode the device is in:
1269 * port MAC + broadcast + catch-all (VLAN filtering is ignored).
1271 * port MAC/VLAN + broadcast + catch-all multicast.
1273 * port MAC/VLAN + broadcast MAC/VLAN.
1275 * About MAC flow rules:
1277 * - MAC flow rules are generated from @p dev->data->mac_addrs
1278 * (@p priv->mac array).
1279 * - An additional flow rule for Ethernet broadcasts is also generated.
1280 * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
1281 * is enabled and VLAN filters are configured.
1284 * Pointer to private structure.
1286 * Perform verbose error reporting if not NULL.
1289 * 0 on success, a negative errno value otherwise and rte_errno is set.
1292 mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
1294 struct rte_flow_attr attr = {
1295 .priority = MLX4_FLOW_PRIORITY_LAST,
1298 struct rte_flow_item_eth eth_spec;
1299 const struct rte_flow_item_eth eth_mask = {
1300 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1302 const struct rte_flow_item_eth eth_allmulti = {
1303 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1305 struct rte_flow_item_vlan vlan_spec;
1306 const struct rte_flow_item_vlan vlan_mask = {
1307 .tci = RTE_BE16(0x0fff),
1309 struct rte_flow_item pattern[] = {
1311 .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
1314 .type = RTE_FLOW_ITEM_TYPE_ETH,
1319 /* Replaced with VLAN if filtering is enabled. */
1320 .type = RTE_FLOW_ITEM_TYPE_END,
1323 .type = RTE_FLOW_ITEM_TYPE_END,
1327 * Round number of queues down to their previous power of 2 to
1328 * comply with RSS context limitations. Extra queues silently do not
1329 * get RSS by default.
1332 rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
1333 uint16_t queue[queues];
1334 struct rte_flow_action_rss action_rss = {
1335 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
1338 .key_len = MLX4_RSS_HASH_KEY_SIZE,
1339 .queue_num = queues,
1340 .key = mlx4_rss_hash_key_default,
1343 struct rte_flow_action actions[] = {
1345 .type = RTE_FLOW_ACTION_TYPE_RSS,
1346 .conf = &action_rss,
1349 .type = RTE_FLOW_ACTION_TYPE_END,
1352 struct ether_addr *rule_mac = ð_spec.dst;
1353 rte_be16_t *rule_vlan =
1354 (priv->dev->data->dev_conf.rxmode.offloads &
1355 DEV_RX_OFFLOAD_VLAN_FILTER) &&
1356 !priv->dev->data->promiscuous ?
1360 struct rte_flow *flow;
1364 /* Nothing to be done if there are no Rx queues. */
1367 /* Prepare default RSS configuration. */
1368 for (i = 0; i != queues; ++i)
1371 * Set up VLAN item if filtering is enabled and at least one VLAN
1372 * filter is configured.
1375 vlan = mlx4_flow_internal_next_vlan(priv, 0);
1377 pattern[2] = (struct rte_flow_item){
1378 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1383 *rule_vlan = rte_cpu_to_be_16(vlan);
1388 for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
1389 const struct ether_addr *mac;
1391 /* Broadcasts are handled by an extra iteration. */
1392 if (i < RTE_DIM(priv->mac))
1393 mac = &priv->mac[i];
1395 mac = ð_mask.dst;
1396 if (is_zero_ether_addr(mac))
1398 /* Check if MAC flow rule is already present. */
1399 for (flow = LIST_FIRST(&priv->flows);
1400 flow && flow->internal;
1401 flow = LIST_NEXT(flow, next)) {
1402 const struct ibv_flow_spec_eth *eth =
1403 (const void *)((uintptr_t)flow->ibv_attr +
1404 sizeof(*flow->ibv_attr));
1409 assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
1410 assert(flow->ibv_attr->num_of_specs == 1);
1411 assert(eth->type == IBV_FLOW_SPEC_ETH);
1414 (eth->val.vlan_tag != *rule_vlan ||
1415 eth->mask.vlan_tag != RTE_BE16(0x0fff)))
1417 if (!rule_vlan && eth->mask.vlan_tag)
1419 for (j = 0; j != sizeof(mac->addr_bytes); ++j)
1420 if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
1421 eth->mask.dst_mac[j] != UINT8_C(0xff) ||
1422 eth->val.src_mac[j] != UINT8_C(0x00) ||
1423 eth->mask.src_mac[j] != UINT8_C(0x00))
1425 if (j != sizeof(mac->addr_bytes))
1427 if (flow->rss->queues != queues ||
1428 memcmp(flow->rss->queue_id, action_rss.queue,
1429 queues * sizeof(flow->rss->queue_id[0])))
1433 if (!flow || !flow->internal) {
1434 /* Not found, create a new flow rule. */
1435 memcpy(rule_mac, mac, sizeof(*mac));
1436 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1447 vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
1451 /* Take care of promiscuous and all multicast flow rules. */
1452 if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
1453 for (flow = LIST_FIRST(&priv->flows);
1454 flow && flow->internal;
1455 flow = LIST_NEXT(flow, next)) {
1456 if (priv->dev->data->promiscuous) {
1460 assert(priv->dev->data->all_multicast);
1465 if (flow && flow->internal) {
1467 if (flow->rss->queues != queues ||
1468 memcmp(flow->rss->queue_id, action_rss.queue,
1469 queues * sizeof(flow->rss->queue_id[0])))
1472 if (!flow || !flow->internal) {
1473 /* Not found, create a new flow rule. */
1474 if (priv->dev->data->promiscuous) {
1475 pattern[1].spec = NULL;
1476 pattern[1].mask = NULL;
1478 assert(priv->dev->data->all_multicast);
1479 pattern[1].spec = ð_allmulti;
1480 pattern[1].mask = ð_allmulti;
1482 pattern[2] = pattern[3];
1483 flow = mlx4_flow_create(priv->dev, &attr, pattern,
1490 assert(flow->promisc || flow->allmulti);
1494 /* Clear selection and clean up stale internal flow rules. */
1495 flow = LIST_FIRST(&priv->flows);
1496 while (flow && flow->internal) {
1497 struct rte_flow *next = LIST_NEXT(flow, next);
1500 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1509 * Synchronize flow rules.
1511 * This function synchronizes flow rules with the state of the device by
1512 * taking into account isolated mode and whether target queues are
1516 * Pointer to private structure.
1518 * Perform verbose error reporting if not NULL.
1521 * 0 on success, a negative errno value otherwise and rte_errno is set.
1524 mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
1526 struct rte_flow *flow;
1529 /* Internal flow rules are guaranteed to come first in the list. */
1530 if (priv->isolated) {
1532 * Get rid of them in isolated mode, stop at the first
1533 * non-internal rule found.
1535 for (flow = LIST_FIRST(&priv->flows);
1536 flow && flow->internal;
1537 flow = LIST_FIRST(&priv->flows))
1538 claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1540 /* Refresh internal rules. */
1541 ret = mlx4_flow_internal(priv, error);
1545 /* Toggle the remaining flow rules . */
1546 LIST_FOREACH(flow, &priv->flows, next) {
1547 ret = mlx4_flow_toggle(priv, flow, priv->started, error);
1552 assert(!priv->drop);
1557 * Clean up all flow rules.
1559 * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
1560 * rules regardless of whether they are internal or user-configured.
1563 * Pointer to private structure.
1566 mlx4_flow_clean(struct priv *priv)
1568 struct rte_flow *flow;
1570 while ((flow = LIST_FIRST(&priv->flows)))
1571 mlx4_flow_destroy(priv->dev, flow, NULL);
1572 assert(LIST_EMPTY(&priv->rss));
1575 static const struct rte_flow_ops mlx4_flow_ops = {
1576 .validate = mlx4_flow_validate,
1577 .create = mlx4_flow_create,
1578 .destroy = mlx4_flow_destroy,
1579 .flush = mlx4_flow_flush,
1580 .isolate = mlx4_flow_isolate,
1584 * Manage filter operations.
1587 * Pointer to Ethernet device structure.
1588 * @param filter_type
1591 * Operation to perform.
1593 * Pointer to operation-specific structure.
1596 * 0 on success, negative errno value otherwise and rte_errno is set.
1599 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1600 enum rte_filter_type filter_type,
1601 enum rte_filter_op filter_op,
1604 switch (filter_type) {
1605 case RTE_ETH_FILTER_GENERIC:
1606 if (filter_op != RTE_ETH_FILTER_GET)
1608 *(const void **)arg = &mlx4_flow_ops;
1611 ERROR("%p: filter type (%d) not supported",
1612 (void *)dev, filter_type);
1615 rte_errno = ENOTSUP;