4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
38 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #pragma GCC diagnostic ignored "-Wpedantic"
42 #include <infiniband/verbs.h>
44 #pragma GCC diagnostic error "-Wpedantic"
47 #include <rte_ethdev.h>
49 #include <rte_flow_driver.h>
50 #include <rte_malloc.h>
55 /* Define minimal priority for control plane flows. */
56 #define MLX5_CTRL_FLOW_PRIORITY 4
59 mlx5_flow_create_eth(const struct rte_flow_item *item,
60 const void *default_mask,
64 mlx5_flow_create_vlan(const struct rte_flow_item *item,
65 const void *default_mask,
69 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
70 const void *default_mask,
74 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
75 const void *default_mask,
79 mlx5_flow_create_udp(const struct rte_flow_item *item,
80 const void *default_mask,
84 mlx5_flow_create_tcp(const struct rte_flow_item *item,
85 const void *default_mask,
89 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
90 const void *default_mask,
93 /** Structure for Drop queue. */
94 struct mlx5_hrxq_drop {
95 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
96 struct ibv_qp *qp; /**< Verbs queue pair. */
97 struct ibv_wq *wq; /**< Verbs work queue. */
98 struct ibv_cq *cq; /**< Verbs completion queue. */
101 /* Flows structures. */
103 uint64_t hash_fields; /**< Fields that participate in the hash. */
104 struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
107 /* Drop flows structures. */
108 struct mlx5_flow_drop {
109 struct mlx5_hrxq_drop hrxq; /**< Drop hash Rx queue. */
113 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
114 uint32_t mark:1; /**< Set if the flow is marked. */
115 uint32_t drop:1; /**< Drop queue. */
116 struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
117 struct ibv_flow *ibv_flow; /**< Verbs flow. */
118 uint16_t queues_n; /**< Number of entries in queue[]. */
119 uint16_t (*queues)[]; /**< Queues indexes to use. */
121 struct mlx5_flow frxq; /**< Flow with Rx queue. */
122 struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
126 /** Static initializer for items. */
128 (const enum rte_flow_item_type []){ \
129 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
132 /** Structure to generate a simple graph of layers supported by the NIC. */
133 struct mlx5_flow_items {
134 /** List of possible actions for these items. */
135 const enum rte_flow_action_type *const actions;
136 /** Bit-masks corresponding to the possibilities for the item. */
139 * Default bit-masks to use when item->mask is not provided. When
140 * \default_mask is also NULL, the full supported bit-mask (\mask) is
143 const void *default_mask;
144 /** Bit-masks size in bytes. */
145 const unsigned int mask_sz;
147 * Conversion function from rte_flow to NIC specific flow.
150 * rte_flow item to convert.
151 * @param default_mask
152 * Default bit-masks to use when item->mask is not provided.
154 * Internal structure to store the conversion.
157 * 0 on success, negative value otherwise.
159 int (*convert)(const struct rte_flow_item *item,
160 const void *default_mask,
162 /** Size in bytes of the destination structure. */
163 const unsigned int dst_sz;
164 /** List of possible following items. */
165 const enum rte_flow_item_type *const items;
168 /** Valid action for this PMD. */
169 static const enum rte_flow_action_type valid_actions[] = {
170 RTE_FLOW_ACTION_TYPE_DROP,
171 RTE_FLOW_ACTION_TYPE_QUEUE,
172 RTE_FLOW_ACTION_TYPE_MARK,
173 RTE_FLOW_ACTION_TYPE_FLAG,
174 RTE_FLOW_ACTION_TYPE_END,
177 /** Graph of supported items and associated actions. */
178 static const struct mlx5_flow_items mlx5_flow_items[] = {
179 [RTE_FLOW_ITEM_TYPE_END] = {
180 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
181 RTE_FLOW_ITEM_TYPE_VXLAN),
183 [RTE_FLOW_ITEM_TYPE_ETH] = {
184 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
185 RTE_FLOW_ITEM_TYPE_IPV4,
186 RTE_FLOW_ITEM_TYPE_IPV6),
187 .actions = valid_actions,
188 .mask = &(const struct rte_flow_item_eth){
189 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
190 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
193 .default_mask = &rte_flow_item_eth_mask,
194 .mask_sz = sizeof(struct rte_flow_item_eth),
195 .convert = mlx5_flow_create_eth,
196 .dst_sz = sizeof(struct ibv_flow_spec_eth),
198 [RTE_FLOW_ITEM_TYPE_VLAN] = {
199 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
200 RTE_FLOW_ITEM_TYPE_IPV6),
201 .actions = valid_actions,
202 .mask = &(const struct rte_flow_item_vlan){
205 .default_mask = &rte_flow_item_vlan_mask,
206 .mask_sz = sizeof(struct rte_flow_item_vlan),
207 .convert = mlx5_flow_create_vlan,
210 [RTE_FLOW_ITEM_TYPE_IPV4] = {
211 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
212 RTE_FLOW_ITEM_TYPE_TCP),
213 .actions = valid_actions,
214 .mask = &(const struct rte_flow_item_ipv4){
218 .type_of_service = -1,
222 .default_mask = &rte_flow_item_ipv4_mask,
223 .mask_sz = sizeof(struct rte_flow_item_ipv4),
224 .convert = mlx5_flow_create_ipv4,
225 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
227 [RTE_FLOW_ITEM_TYPE_IPV6] = {
228 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
229 RTE_FLOW_ITEM_TYPE_TCP),
230 .actions = valid_actions,
231 .mask = &(const struct rte_flow_item_ipv6){
234 0xff, 0xff, 0xff, 0xff,
235 0xff, 0xff, 0xff, 0xff,
236 0xff, 0xff, 0xff, 0xff,
237 0xff, 0xff, 0xff, 0xff,
240 0xff, 0xff, 0xff, 0xff,
241 0xff, 0xff, 0xff, 0xff,
242 0xff, 0xff, 0xff, 0xff,
243 0xff, 0xff, 0xff, 0xff,
250 .default_mask = &rte_flow_item_ipv6_mask,
251 .mask_sz = sizeof(struct rte_flow_item_ipv6),
252 .convert = mlx5_flow_create_ipv6,
253 .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
255 [RTE_FLOW_ITEM_TYPE_UDP] = {
256 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
257 .actions = valid_actions,
258 .mask = &(const struct rte_flow_item_udp){
264 .default_mask = &rte_flow_item_udp_mask,
265 .mask_sz = sizeof(struct rte_flow_item_udp),
266 .convert = mlx5_flow_create_udp,
267 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
269 [RTE_FLOW_ITEM_TYPE_TCP] = {
270 .actions = valid_actions,
271 .mask = &(const struct rte_flow_item_tcp){
277 .default_mask = &rte_flow_item_tcp_mask,
278 .mask_sz = sizeof(struct rte_flow_item_tcp),
279 .convert = mlx5_flow_create_tcp,
280 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
282 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
283 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
284 .actions = valid_actions,
285 .mask = &(const struct rte_flow_item_vxlan){
286 .vni = "\xff\xff\xff",
288 .default_mask = &rte_flow_item_vxlan_mask,
289 .mask_sz = sizeof(struct rte_flow_item_vxlan),
290 .convert = mlx5_flow_create_vxlan,
291 .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
295 /* Structure to parse actions. */
296 struct mlx5_flow_action {
297 uint32_t queue:1; /**< Target is a receive queue. */
298 uint32_t drop:1; /**< Target is a drop queue. */
299 uint32_t mark:1; /**< Mark is present in the flow. */
300 uint32_t mark_id; /**< Mark identifier. */
301 uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
302 uint16_t queues_n; /**< Number of entries in queue[]. */
305 /** Structure to pass to the conversion function. */
306 struct mlx5_flow_parse {
307 struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
308 unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
309 uint32_t inner; /**< Set once VXLAN is encountered. */
310 uint64_t hash_fields; /**< Fields that participate in the hash. */
311 struct mlx5_flow_action actions; /**< Parsed action result. */
314 static const struct rte_flow_ops mlx5_flow_ops = {
315 .validate = mlx5_flow_validate,
316 .create = mlx5_flow_create,
317 .destroy = mlx5_flow_destroy,
318 .flush = mlx5_flow_flush,
320 .isolate = mlx5_flow_isolate,
324 * Manage filter operations.
327 * Pointer to Ethernet device structure.
331 * Operation to perform.
333 * Pointer to operation-specific structure.
336 * 0 on success, negative errno value on failure.
339 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
340 enum rte_filter_type filter_type,
341 enum rte_filter_op filter_op,
346 if (filter_type == RTE_ETH_FILTER_GENERIC) {
347 if (filter_op != RTE_ETH_FILTER_GET)
349 *(const void **)arg = &mlx5_flow_ops;
352 ERROR("%p: filter type (%d) not supported",
353 (void *)dev, filter_type);
358 * Check support for a given item.
361 * Item specification.
363 * Bit-masks covering supported fields to compare with spec, last and mask in
366 * Bit-Mask size in bytes.
372 mlx5_flow_item_validate(const struct rte_flow_item *item,
373 const uint8_t *mask, unsigned int size)
377 if (!item->spec && (item->mask || item->last))
379 if (item->spec && !item->mask) {
381 const uint8_t *spec = item->spec;
383 for (i = 0; i < size; ++i)
384 if ((spec[i] | mask[i]) != mask[i])
387 if (item->last && !item->mask) {
389 const uint8_t *spec = item->last;
391 for (i = 0; i < size; ++i)
392 if ((spec[i] | mask[i]) != mask[i])
397 const uint8_t *spec = item->mask;
399 for (i = 0; i < size; ++i)
400 if ((spec[i] | mask[i]) != mask[i])
403 if (item->spec && item->last) {
406 const uint8_t *apply = mask;
411 for (i = 0; i < size; ++i) {
412 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
413 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
415 ret = memcmp(spec, last, size);
421 * Validate a flow supported by the NIC.
424 * Pointer to private structure.
426 * Flow rule attributes.
428 * Pattern specification (list terminated by the END pattern item).
430 * Associated actions (list terminated by the END action).
432 * Perform verbose error reporting if not NULL.
433 * @param[in, out] flow
434 * Flow structure to update.
437 * 0 on success, a negative errno value otherwise and rte_errno is set.
440 priv_flow_validate(struct priv *priv,
441 const struct rte_flow_attr *attr,
442 const struct rte_flow_item items[],
443 const struct rte_flow_action actions[],
444 struct rte_flow_error *error,
445 struct mlx5_flow_parse *flow)
447 const struct mlx5_flow_items *cur_item = mlx5_flow_items;
451 rte_flow_error_set(error, ENOTSUP,
452 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
454 "groups are not supported");
457 if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
458 rte_flow_error_set(error, ENOTSUP,
459 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
461 "priorities are not supported");
465 rte_flow_error_set(error, ENOTSUP,
466 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
468 "egress is not supported");
471 if (!attr->ingress) {
472 rte_flow_error_set(error, ENOTSUP,
473 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
475 "only ingress is supported");
478 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
479 const struct mlx5_flow_items *token = NULL;
483 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
487 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
489 if (cur_item->items[i] == items->type) {
490 token = &mlx5_flow_items[items->type];
495 goto exit_item_not_supported;
497 err = mlx5_flow_item_validate(items,
498 (const uint8_t *)cur_item->mask,
501 goto exit_item_not_supported;
502 if (flow->ibv_attr && cur_item->convert) {
503 err = cur_item->convert(items,
504 (cur_item->default_mask ?
505 cur_item->default_mask :
509 goto exit_item_not_supported;
510 } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
512 rte_flow_error_set(error, ENOTSUP,
513 RTE_FLOW_ERROR_TYPE_ITEM,
515 "cannot recognize multiple"
516 " VXLAN encapsulations");
521 flow->offset += cur_item->dst_sz;
523 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
524 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
526 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
527 flow->actions.drop = 1;
528 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
529 const struct rte_flow_action_queue *queue =
530 (const struct rte_flow_action_queue *)
535 if (!queue || (queue->index > (priv->rxqs_n - 1)))
536 goto exit_action_not_supported;
537 for (n = 0; n < flow->actions.queues_n; ++n) {
538 if (flow->actions.queues[n] == queue->index) {
543 if (flow->actions.queues_n > 1 && !found) {
544 rte_flow_error_set(error, ENOTSUP,
545 RTE_FLOW_ERROR_TYPE_ACTION,
547 "queue action not in RSS queues");
551 flow->actions.queue = 1;
552 flow->actions.queues_n = 1;
553 flow->actions.queues[0] = queue->index;
555 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
556 const struct rte_flow_action_rss *rss =
557 (const struct rte_flow_action_rss *)
561 if (!rss || !rss->num) {
562 rte_flow_error_set(error, EINVAL,
563 RTE_FLOW_ERROR_TYPE_ACTION,
568 if (flow->actions.queues_n == 1) {
571 assert(flow->actions.queues_n);
572 for (n = 0; n < rss->num; ++n) {
573 if (flow->actions.queues[0] ==
580 rte_flow_error_set(error, ENOTSUP,
581 RTE_FLOW_ERROR_TYPE_ACTION,
583 "queue action not in RSS"
588 for (n = 0; n < rss->num; ++n) {
589 if (rss->queue[n] >= priv->rxqs_n) {
590 rte_flow_error_set(error, EINVAL,
591 RTE_FLOW_ERROR_TYPE_ACTION,
593 "queue id > number of"
598 flow->actions.queue = 1;
599 for (n = 0; n < rss->num; ++n)
600 flow->actions.queues[n] = rss->queue[n];
601 flow->actions.queues_n = rss->num;
602 } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
603 const struct rte_flow_action_mark *mark =
604 (const struct rte_flow_action_mark *)
608 rte_flow_error_set(error, EINVAL,
609 RTE_FLOW_ERROR_TYPE_ACTION,
611 "mark must be defined");
613 } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
614 rte_flow_error_set(error, ENOTSUP,
615 RTE_FLOW_ERROR_TYPE_ACTION,
617 "mark must be between 0"
621 flow->actions.mark = 1;
622 flow->actions.mark_id = mark->id;
623 } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
624 flow->actions.mark = 1;
626 goto exit_action_not_supported;
629 if (flow->actions.mark && !flow->ibv_attr && !flow->actions.drop)
630 flow->offset += sizeof(struct ibv_flow_spec_action_tag);
631 if (!flow->ibv_attr && flow->actions.drop)
632 flow->offset += sizeof(struct ibv_flow_spec_action_drop);
633 if (!flow->actions.queue && !flow->actions.drop) {
634 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
635 NULL, "no valid action");
639 exit_item_not_supported:
640 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
641 items, "item not supported");
643 exit_action_not_supported:
644 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
645 actions, "action not supported");
650 * Validate a flow supported by the NIC.
652 * @see rte_flow_validate()
656 mlx5_flow_validate(struct rte_eth_dev *dev,
657 const struct rte_flow_attr *attr,
658 const struct rte_flow_item items[],
659 const struct rte_flow_action actions[],
660 struct rte_flow_error *error)
662 struct priv *priv = dev->data->dev_private;
664 struct mlx5_flow_parse flow = {
665 .offset = sizeof(struct ibv_flow_attr),
667 .mark_id = MLX5_FLOW_MARK_DEFAULT,
673 ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
679 * Convert Ethernet item to Verbs specification.
682 * Item specification.
683 * @param default_mask[in]
684 * Default bit-masks to use when item->mask is not provided.
685 * @param data[in, out]
689 mlx5_flow_create_eth(const struct rte_flow_item *item,
690 const void *default_mask,
693 const struct rte_flow_item_eth *spec = item->spec;
694 const struct rte_flow_item_eth *mask = item->mask;
695 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
696 struct ibv_flow_spec_eth *eth;
697 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
700 ++flow->ibv_attr->num_of_specs;
701 flow->ibv_attr->priority = 2;
702 flow->hash_fields = 0;
703 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
704 *eth = (struct ibv_flow_spec_eth) {
705 .type = flow->inner | IBV_FLOW_SPEC_ETH,
712 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
713 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
714 eth->val.ether_type = spec->type;
715 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
716 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
717 eth->mask.ether_type = mask->type;
718 /* Remove unwanted bits from values. */
719 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
720 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
721 eth->val.src_mac[i] &= eth->mask.src_mac[i];
723 eth->val.ether_type &= eth->mask.ether_type;
728 * Convert VLAN item to Verbs specification.
731 * Item specification.
732 * @param default_mask[in]
733 * Default bit-masks to use when item->mask is not provided.
734 * @param data[in, out]
738 mlx5_flow_create_vlan(const struct rte_flow_item *item,
739 const void *default_mask,
742 const struct rte_flow_item_vlan *spec = item->spec;
743 const struct rte_flow_item_vlan *mask = item->mask;
744 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
745 struct ibv_flow_spec_eth *eth;
746 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
748 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
753 eth->val.vlan_tag = spec->tci;
754 eth->mask.vlan_tag = mask->tci;
755 eth->val.vlan_tag &= eth->mask.vlan_tag;
760 * Convert IPv4 item to Verbs specification.
763 * Item specification.
764 * @param default_mask[in]
765 * Default bit-masks to use when item->mask is not provided.
766 * @param data[in, out]
770 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
771 const void *default_mask,
774 const struct rte_flow_item_ipv4 *spec = item->spec;
775 const struct rte_flow_item_ipv4 *mask = item->mask;
776 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
777 struct ibv_flow_spec_ipv4_ext *ipv4;
778 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
780 ++flow->ibv_attr->num_of_specs;
781 flow->ibv_attr->priority = 1;
782 flow->hash_fields = (IBV_RX_HASH_SRC_IPV4 |
783 IBV_RX_HASH_DST_IPV4);
784 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
785 *ipv4 = (struct ibv_flow_spec_ipv4_ext) {
786 .type = flow->inner | IBV_FLOW_SPEC_IPV4_EXT,
793 ipv4->val = (struct ibv_flow_ipv4_ext_filter){
794 .src_ip = spec->hdr.src_addr,
795 .dst_ip = spec->hdr.dst_addr,
796 .proto = spec->hdr.next_proto_id,
797 .tos = spec->hdr.type_of_service,
799 ipv4->mask = (struct ibv_flow_ipv4_ext_filter){
800 .src_ip = mask->hdr.src_addr,
801 .dst_ip = mask->hdr.dst_addr,
802 .proto = mask->hdr.next_proto_id,
803 .tos = mask->hdr.type_of_service,
805 /* Remove unwanted bits from values. */
806 ipv4->val.src_ip &= ipv4->mask.src_ip;
807 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
808 ipv4->val.proto &= ipv4->mask.proto;
809 ipv4->val.tos &= ipv4->mask.tos;
814 * Convert IPv6 item to Verbs specification.
817 * Item specification.
818 * @param default_mask[in]
819 * Default bit-masks to use when item->mask is not provided.
820 * @param data[in, out]
824 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
825 const void *default_mask,
828 const struct rte_flow_item_ipv6 *spec = item->spec;
829 const struct rte_flow_item_ipv6 *mask = item->mask;
830 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
831 struct ibv_flow_spec_ipv6 *ipv6;
832 unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
835 ++flow->ibv_attr->num_of_specs;
836 flow->ibv_attr->priority = 1;
837 flow->hash_fields = (IBV_RX_HASH_SRC_IPV6 |
838 IBV_RX_HASH_DST_IPV6);
839 ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
840 *ipv6 = (struct ibv_flow_spec_ipv6) {
841 .type = flow->inner | IBV_FLOW_SPEC_IPV6,
848 memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
849 RTE_DIM(ipv6->val.src_ip));
850 memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
851 RTE_DIM(ipv6->val.dst_ip));
852 memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
853 RTE_DIM(ipv6->mask.src_ip));
854 memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
855 RTE_DIM(ipv6->mask.dst_ip));
856 ipv6->mask.flow_label = mask->hdr.vtc_flow;
857 ipv6->mask.next_hdr = mask->hdr.proto;
858 ipv6->mask.hop_limit = mask->hdr.hop_limits;
859 /* Remove unwanted bits from values. */
860 for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
861 ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
862 ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
864 ipv6->val.flow_label &= ipv6->mask.flow_label;
865 ipv6->val.next_hdr &= ipv6->mask.next_hdr;
866 ipv6->val.hop_limit &= ipv6->mask.hop_limit;
871 * Convert UDP item to Verbs specification.
874 * Item specification.
875 * @param default_mask[in]
876 * Default bit-masks to use when item->mask is not provided.
877 * @param data[in, out]
881 mlx5_flow_create_udp(const struct rte_flow_item *item,
882 const void *default_mask,
885 const struct rte_flow_item_udp *spec = item->spec;
886 const struct rte_flow_item_udp *mask = item->mask;
887 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
888 struct ibv_flow_spec_tcp_udp *udp;
889 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
891 ++flow->ibv_attr->num_of_specs;
892 flow->ibv_attr->priority = 0;
893 flow->hash_fields |= (IBV_RX_HASH_SRC_PORT_UDP |
894 IBV_RX_HASH_DST_PORT_UDP);
895 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
896 *udp = (struct ibv_flow_spec_tcp_udp) {
897 .type = flow->inner | IBV_FLOW_SPEC_UDP,
904 udp->val.dst_port = spec->hdr.dst_port;
905 udp->val.src_port = spec->hdr.src_port;
906 udp->mask.dst_port = mask->hdr.dst_port;
907 udp->mask.src_port = mask->hdr.src_port;
908 /* Remove unwanted bits from values. */
909 udp->val.src_port &= udp->mask.src_port;
910 udp->val.dst_port &= udp->mask.dst_port;
915 * Convert TCP item to Verbs specification.
918 * Item specification.
919 * @param default_mask[in]
920 * Default bit-masks to use when item->mask is not provided.
921 * @param data[in, out]
925 mlx5_flow_create_tcp(const struct rte_flow_item *item,
926 const void *default_mask,
929 const struct rte_flow_item_tcp *spec = item->spec;
930 const struct rte_flow_item_tcp *mask = item->mask;
931 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
932 struct ibv_flow_spec_tcp_udp *tcp;
933 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
935 ++flow->ibv_attr->num_of_specs;
936 flow->ibv_attr->priority = 0;
937 flow->hash_fields |= (IBV_RX_HASH_SRC_PORT_TCP |
938 IBV_RX_HASH_DST_PORT_TCP);
939 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
940 *tcp = (struct ibv_flow_spec_tcp_udp) {
941 .type = flow->inner | IBV_FLOW_SPEC_TCP,
948 tcp->val.dst_port = spec->hdr.dst_port;
949 tcp->val.src_port = spec->hdr.src_port;
950 tcp->mask.dst_port = mask->hdr.dst_port;
951 tcp->mask.src_port = mask->hdr.src_port;
952 /* Remove unwanted bits from values. */
953 tcp->val.src_port &= tcp->mask.src_port;
954 tcp->val.dst_port &= tcp->mask.dst_port;
959 * Convert VXLAN item to Verbs specification.
962 * Item specification.
963 * @param default_mask[in]
964 * Default bit-masks to use when item->mask is not provided.
965 * @param data[in, out]
969 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
970 const void *default_mask,
973 const struct rte_flow_item_vxlan *spec = item->spec;
974 const struct rte_flow_item_vxlan *mask = item->mask;
975 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
976 struct ibv_flow_spec_tunnel *vxlan;
977 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
983 ++flow->ibv_attr->num_of_specs;
984 flow->ibv_attr->priority = 0;
986 vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
987 *vxlan = (struct ibv_flow_spec_tunnel) {
988 .type = flow->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
991 flow->inner = IBV_FLOW_SPEC_INNER;
996 memcpy(&id.vni[1], spec->vni, 3);
997 vxlan->val.tunnel_id = id.vlan_id;
998 memcpy(&id.vni[1], mask->vni, 3);
999 vxlan->mask.tunnel_id = id.vlan_id;
1000 /* Remove unwanted bits from values. */
1001 vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
1006 * Convert mark/flag action to Verbs specification.
1009 * Pointer to MLX5 flow structure.
1014 mlx5_flow_create_flag_mark(struct mlx5_flow_parse *flow, uint32_t mark_id)
1016 struct ibv_flow_spec_action_tag *tag;
1017 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1019 tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
1020 *tag = (struct ibv_flow_spec_action_tag){
1021 .type = IBV_FLOW_SPEC_ACTION_TAG,
1023 .tag_id = mlx5_flow_mark_set(mark_id),
1025 ++flow->ibv_attr->num_of_specs;
1030 * Complete flow rule creation with a drop queue.
1033 * Pointer to private structure.
1035 * MLX5 flow attributes (filled by mlx5_flow_validate()).
1037 * Perform verbose error reporting if not NULL.
1040 * A flow if the rule could be created.
1042 static struct rte_flow *
1043 priv_flow_create_action_queue_drop(struct priv *priv,
1044 struct mlx5_flow_parse *flow,
1045 struct rte_flow_error *error)
1047 struct rte_flow *rte_flow;
1048 struct ibv_flow_spec_action_drop *drop;
1049 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1053 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
1055 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1056 NULL, "cannot allocate flow memory");
1060 drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
1061 *drop = (struct ibv_flow_spec_action_drop){
1062 .type = IBV_FLOW_SPEC_ACTION_DROP,
1065 ++flow->ibv_attr->num_of_specs;
1066 flow->offset += sizeof(struct ibv_flow_spec_action_drop);
1067 rte_flow->ibv_attr = flow->ibv_attr;
1068 if (!priv->dev->data->dev_started)
1070 rte_flow->drxq.hrxq.qp = priv->flow_drop_queue->qp;
1071 rte_flow->ibv_flow = ibv_create_flow(rte_flow->drxq.hrxq.qp,
1072 rte_flow->ibv_attr);
1073 if (!rte_flow->ibv_flow) {
1074 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1075 NULL, "flow rule creation failure");
1086 * Complete flow rule creation.
1089 * Pointer to private structure.
1091 * MLX5 flow attributes (filled by mlx5_flow_validate()).
1093 * Perform verbose error reporting if not NULL.
1096 * A flow if the rule could be created.
1098 static struct rte_flow *
1099 priv_flow_create_action_queue(struct priv *priv,
1100 struct mlx5_flow_parse *flow,
1101 struct rte_flow_error *error)
1103 struct rte_flow *rte_flow;
1108 assert(!flow->actions.drop);
1110 rte_calloc(__func__, 1,
1112 flow->actions.queues_n * sizeof(uint16_t),
1115 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1116 NULL, "cannot allocate flow memory");
1119 rte_flow->mark = flow->actions.mark;
1120 rte_flow->ibv_attr = flow->ibv_attr;
1121 rte_flow->queues = (uint16_t (*)[])(rte_flow + 1);
1122 memcpy(rte_flow->queues, flow->actions.queues,
1123 flow->actions.queues_n * sizeof(uint16_t));
1124 rte_flow->queues_n = flow->actions.queues_n;
1125 rte_flow->frxq.hash_fields = flow->hash_fields;
1126 rte_flow->frxq.hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1127 rss_hash_default_key_len,
1129 (*rte_flow->queues),
1130 rte_flow->queues_n);
1131 if (!rte_flow->frxq.hrxq) {
1132 rte_flow->frxq.hrxq =
1133 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1134 rss_hash_default_key_len,
1136 (*rte_flow->queues),
1137 rte_flow->queues_n);
1138 if (!rte_flow->frxq.hrxq) {
1139 rte_flow_error_set(error, ENOMEM,
1140 RTE_FLOW_ERROR_TYPE_HANDLE,
1141 NULL, "cannot create hash rxq");
1145 for (i = 0; i != flow->actions.queues_n; ++i) {
1146 struct mlx5_rxq_data *q =
1147 (*priv->rxqs)[flow->actions.queues[i]];
1149 q->mark |= flow->actions.mark;
1151 if (!priv->dev->data->dev_started)
1153 rte_flow->ibv_flow = ibv_create_flow(rte_flow->frxq.hrxq->qp,
1154 rte_flow->ibv_attr);
1155 if (!rte_flow->ibv_flow) {
1156 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1157 NULL, "flow rule creation failure");
1163 if (rte_flow->frxq.hrxq)
1164 mlx5_priv_hrxq_release(priv, rte_flow->frxq.hrxq);
1173 * Pointer to private structure.
1175 * Pointer to a TAILQ flow list.
1177 * Flow rule attributes.
1178 * @param[in] pattern
1179 * Pattern specification (list terminated by the END pattern item).
1180 * @param[in] actions
1181 * Associated actions (list terminated by the END action).
1183 * Perform verbose error reporting if not NULL.
1186 * A flow on success, NULL otherwise.
1188 static struct rte_flow *
1189 priv_flow_create(struct priv *priv,
1190 struct mlx5_flows *list,
1191 const struct rte_flow_attr *attr,
1192 const struct rte_flow_item items[],
1193 const struct rte_flow_action actions[],
1194 struct rte_flow_error *error)
1196 struct rte_flow *rte_flow;
1197 struct mlx5_flow_parse flow = {
1198 .offset = sizeof(struct ibv_flow_attr),
1200 .mark_id = MLX5_FLOW_MARK_DEFAULT,
1207 err = priv_flow_validate(priv, attr, items, actions, error, &flow);
1210 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
1211 flow.offset = sizeof(struct ibv_flow_attr);
1212 if (!flow.ibv_attr) {
1213 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1214 NULL, "cannot allocate ibv_attr memory");
1217 *flow.ibv_attr = (struct ibv_flow_attr){
1218 .type = IBV_FLOW_ATTR_NORMAL,
1219 .size = sizeof(struct ibv_flow_attr),
1220 .priority = attr->priority,
1226 flow.hash_fields = 0;
1227 claim_zero(priv_flow_validate(priv, attr, items, actions,
1229 if (flow.actions.mark && !flow.actions.drop) {
1230 mlx5_flow_create_flag_mark(&flow, flow.actions.mark_id);
1231 flow.offset += sizeof(struct ibv_flow_spec_action_tag);
1233 if (flow.actions.drop)
1235 priv_flow_create_action_queue_drop(priv, &flow, error);
1237 rte_flow = priv_flow_create_action_queue(priv, &flow, error);
1241 TAILQ_INSERT_TAIL(list, rte_flow, next);
1242 DEBUG("Flow created %p", (void *)rte_flow);
1246 rte_free(flow.ibv_attr);
1253 * @see rte_flow_create()
1257 mlx5_flow_create(struct rte_eth_dev *dev,
1258 const struct rte_flow_attr *attr,
1259 const struct rte_flow_item items[],
1260 const struct rte_flow_action actions[],
1261 struct rte_flow_error *error)
1263 struct priv *priv = dev->data->dev_private;
1264 struct rte_flow *flow;
1267 flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
1277 * Pointer to private structure.
1279 * Pointer to a TAILQ flow list.
1284 priv_flow_destroy(struct priv *priv,
1285 struct mlx5_flows *list,
1286 struct rte_flow *flow)
1292 if (flow->drop || !flow->mark)
1294 queues = flow->frxq.hrxq->ind_table->queues;
1295 queues_n = flow->frxq.hrxq->ind_table->queues_n;
1296 for (i = 0; i != queues_n; ++i) {
1297 struct rte_flow *tmp;
1298 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[i]];
1302 * To remove the mark from the queue, the queue must not be
1303 * present in any other marked flow (RSS or not).
1305 TAILQ_FOREACH(tmp, list, next) {
1311 (j != tmp->frxq.hrxq->ind_table->queues_n) &&
1314 if (tmp->frxq.hrxq->ind_table->queues[j] ==
1318 rxq_data->mark = mark;
1322 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1324 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1325 TAILQ_REMOVE(list, flow, next);
1326 rte_free(flow->ibv_attr);
1327 DEBUG("Flow destroyed %p", (void *)flow);
1334 * @see rte_flow_destroy()
1338 mlx5_flow_destroy(struct rte_eth_dev *dev,
1339 struct rte_flow *flow,
1340 struct rte_flow_error *error)
1342 struct priv *priv = dev->data->dev_private;
1346 priv_flow_destroy(priv, &priv->flows, flow);
1352 * Destroy all flows.
1355 * Pointer to private structure.
1357 * Pointer to a TAILQ flow list.
1360 priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
1362 while (!TAILQ_EMPTY(list)) {
1363 struct rte_flow *flow;
1365 flow = TAILQ_FIRST(list);
1366 priv_flow_destroy(priv, list, flow);
1371 * Destroy all flows.
1373 * @see rte_flow_flush()
1377 mlx5_flow_flush(struct rte_eth_dev *dev,
1378 struct rte_flow_error *error)
1380 struct priv *priv = dev->data->dev_private;
1384 priv_flow_flush(priv, &priv->flows);
1390 * Create drop queue.
1393 * Pointer to private structure.
1399 priv_flow_create_drop_queue(struct priv *priv)
1401 struct mlx5_hrxq_drop *fdq = NULL;
1405 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
1407 WARN("cannot allocate memory for drop queue");
1410 fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
1412 WARN("cannot allocate CQ for drop queue");
1415 fdq->wq = ibv_create_wq(priv->ctx,
1416 &(struct ibv_wq_init_attr){
1417 .wq_type = IBV_WQT_RQ,
1424 WARN("cannot allocate WQ for drop queue");
1427 fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
1428 &(struct ibv_rwq_ind_table_init_attr){
1429 .log_ind_tbl_size = 0,
1430 .ind_tbl = &fdq->wq,
1433 if (!fdq->ind_table) {
1434 WARN("cannot allocate indirection table for drop queue");
1437 fdq->qp = ibv_create_qp_ex(priv->ctx,
1438 &(struct ibv_qp_init_attr_ex){
1439 .qp_type = IBV_QPT_RAW_PACKET,
1441 IBV_QP_INIT_ATTR_PD |
1442 IBV_QP_INIT_ATTR_IND_TABLE |
1443 IBV_QP_INIT_ATTR_RX_HASH,
1444 .rx_hash_conf = (struct ibv_rx_hash_conf){
1446 IBV_RX_HASH_FUNC_TOEPLITZ,
1447 .rx_hash_key_len = rss_hash_default_key_len,
1448 .rx_hash_key = rss_hash_default_key,
1449 .rx_hash_fields_mask = 0,
1451 .rwq_ind_tbl = fdq->ind_table,
1455 WARN("cannot allocate QP for drop queue");
1458 priv->flow_drop_queue = fdq;
1462 claim_zero(ibv_destroy_qp(fdq->qp));
1464 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1466 claim_zero(ibv_destroy_wq(fdq->wq));
1468 claim_zero(ibv_destroy_cq(fdq->cq));
1471 priv->flow_drop_queue = NULL;
1476 * Delete drop queue.
1479 * Pointer to private structure.
1482 priv_flow_delete_drop_queue(struct priv *priv)
1484 struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
1489 claim_zero(ibv_destroy_qp(fdq->qp));
1491 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1493 claim_zero(ibv_destroy_wq(fdq->wq));
1495 claim_zero(ibv_destroy_cq(fdq->cq));
1497 priv->flow_drop_queue = NULL;
1504 * Pointer to private structure.
1506 * Pointer to a TAILQ flow list.
1509 priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
1511 struct rte_flow *flow;
1513 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
1514 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1515 flow->ibv_flow = NULL;
1516 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1517 flow->frxq.hrxq = NULL;
1520 struct mlx5_ind_table_ibv *ind_tbl =
1521 flow->frxq.hrxq->ind_table;
1523 for (n = 0; n < ind_tbl->queues_n; ++n)
1524 (*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
1526 DEBUG("Flow %p removed", (void *)flow);
1534 * Pointer to private structure.
1536 * Pointer to a TAILQ flow list.
1539 * 0 on success, a errno value otherwise and rte_errno is set.
1542 priv_flow_start(struct priv *priv, struct mlx5_flows *list)
1544 struct rte_flow *flow;
1546 TAILQ_FOREACH(flow, list, next) {
1547 if (flow->frxq.hrxq)
1550 mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1551 rss_hash_default_key_len,
1552 flow->frxq.hash_fields,
1555 if (flow->frxq.hrxq)
1558 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1559 rss_hash_default_key_len,
1560 flow->frxq.hash_fields,
1563 if (!flow->frxq.hrxq) {
1564 DEBUG("Flow %p cannot be applied",
1570 flow->ibv_flow = ibv_create_flow(flow->frxq.hrxq->qp,
1572 if (!flow->ibv_flow) {
1573 DEBUG("Flow %p cannot be applied", (void *)flow);
1577 DEBUG("Flow %p applied", (void *)flow);
1582 n < flow->frxq.hrxq->ind_table->queues_n;
1585 flow->frxq.hrxq->ind_table->queues[n];
1586 (*priv->rxqs)[idx]->mark = 1;
1596 * @see rte_flow_isolate()
1600 mlx5_flow_isolate(struct rte_eth_dev *dev,
1602 struct rte_flow_error *error)
1604 struct priv *priv = dev->data->dev_private;
1607 if (dev->data->dev_started) {
1608 rte_flow_error_set(error, EBUSY,
1609 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1611 "port must be stopped first");
1615 priv->isolated = !!enable;
1621 * Verify the flow list is empty
1624 * Pointer to private structure.
1626 * @return the number of flows not released.
1629 priv_flow_verify(struct priv *priv)
1631 struct rte_flow *flow;
1634 TAILQ_FOREACH(flow, &priv->flows, next) {
1635 DEBUG("%p: flow %p still referenced", (void *)priv,
1643 * Enable a control flow configured from the control plane.
1646 * Pointer to Ethernet device.
1648 * An Ethernet flow spec to apply.
1650 * An Ethernet flow mask to apply.
1652 * A VLAN flow spec to apply.
1654 * A VLAN flow mask to apply.
1660 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1661 struct rte_flow_item_eth *eth_spec,
1662 struct rte_flow_item_eth *eth_mask,
1663 struct rte_flow_item_vlan *vlan_spec,
1664 struct rte_flow_item_vlan *vlan_mask)
1666 struct priv *priv = dev->data->dev_private;
1667 const struct rte_flow_attr attr = {
1669 .priority = MLX5_CTRL_FLOW_PRIORITY,
1671 struct rte_flow_item items[] = {
1673 .type = RTE_FLOW_ITEM_TYPE_ETH,
1679 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
1680 RTE_FLOW_ITEM_TYPE_END,
1686 .type = RTE_FLOW_ITEM_TYPE_END,
1689 struct rte_flow_action actions[] = {
1691 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1692 .conf = &(struct rte_flow_action_queue){
1697 .type = RTE_FLOW_ACTION_TYPE_END,
1700 struct rte_flow *flow;
1701 struct rte_flow_error error;
1703 flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
1711 * Enable a flow control configured from the control plane.
1714 * Pointer to Ethernet device.
1716 * An Ethernet flow spec to apply.
1718 * An Ethernet flow mask to apply.
1724 mlx5_ctrl_flow(struct rte_eth_dev *dev,
1725 struct rte_flow_item_eth *eth_spec,
1726 struct rte_flow_item_eth *eth_mask)
1728 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);