4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
38 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #pragma GCC diagnostic ignored "-Wpedantic"
42 #include <infiniband/verbs.h>
44 #pragma GCC diagnostic error "-Wpedantic"
47 #include <rte_ethdev.h>
49 #include <rte_flow_driver.h>
50 #include <rte_malloc.h>
55 /* Define minimal priority for control plane flows. */
56 #define MLX5_CTRL_FLOW_PRIORITY 4
59 mlx5_flow_create_eth(const struct rte_flow_item *item,
60 const void *default_mask,
64 mlx5_flow_create_vlan(const struct rte_flow_item *item,
65 const void *default_mask,
69 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
70 const void *default_mask,
74 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
75 const void *default_mask,
79 mlx5_flow_create_udp(const struct rte_flow_item *item,
80 const void *default_mask,
84 mlx5_flow_create_tcp(const struct rte_flow_item *item,
85 const void *default_mask,
89 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
90 const void *default_mask,
93 /** Structure for Drop queue. */
94 struct mlx5_hrxq_drop {
95 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
96 struct ibv_qp *qp; /**< Verbs queue pair. */
97 struct ibv_wq *wq; /**< Verbs work queue. */
98 struct ibv_cq *cq; /**< Verbs completion queue. */
101 /* Flows structures. */
103 uint64_t hash_fields; /**< Fields that participate in the hash. */
104 struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
105 struct ibv_flow *ibv_flow; /**< Verbs flow. */
106 struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
109 /* Drop flows structures. */
110 struct mlx5_flow_drop {
111 struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
112 struct ibv_flow *ibv_flow; /**< Verbs flow. */
113 struct mlx5_hrxq_drop hrxq; /**< Drop hash Rx queue. */
117 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
118 uint32_t mark:1; /**< Set if the flow is marked. */
119 uint32_t drop:1; /**< Drop queue. */
120 uint16_t queues_n; /**< Number of entries in queue[]. */
121 uint16_t (*queues)[]; /**< Queues indexes to use. */
123 struct mlx5_flow frxq; /**< Flow with Rx queue. */
124 struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
128 /** Static initializer for items. */
130 (const enum rte_flow_item_type []){ \
131 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
134 /** Structure to generate a simple graph of layers supported by the NIC. */
135 struct mlx5_flow_items {
136 /** List of possible actions for these items. */
137 const enum rte_flow_action_type *const actions;
138 /** Bit-masks corresponding to the possibilities for the item. */
141 * Default bit-masks to use when item->mask is not provided. When
142 * \default_mask is also NULL, the full supported bit-mask (\mask) is
145 const void *default_mask;
146 /** Bit-masks size in bytes. */
147 const unsigned int mask_sz;
149 * Conversion function from rte_flow to NIC specific flow.
152 * rte_flow item to convert.
153 * @param default_mask
154 * Default bit-masks to use when item->mask is not provided.
156 * Internal structure to store the conversion.
159 * 0 on success, negative value otherwise.
161 int (*convert)(const struct rte_flow_item *item,
162 const void *default_mask,
164 /** Size in bytes of the destination structure. */
165 const unsigned int dst_sz;
166 /** List of possible following items. */
167 const enum rte_flow_item_type *const items;
170 /** Valid action for this PMD. */
171 static const enum rte_flow_action_type valid_actions[] = {
172 RTE_FLOW_ACTION_TYPE_DROP,
173 RTE_FLOW_ACTION_TYPE_QUEUE,
174 RTE_FLOW_ACTION_TYPE_MARK,
175 RTE_FLOW_ACTION_TYPE_FLAG,
176 RTE_FLOW_ACTION_TYPE_END,
179 /** Graph of supported items and associated actions. */
180 static const struct mlx5_flow_items mlx5_flow_items[] = {
181 [RTE_FLOW_ITEM_TYPE_END] = {
182 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
183 RTE_FLOW_ITEM_TYPE_VXLAN),
185 [RTE_FLOW_ITEM_TYPE_ETH] = {
186 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
187 RTE_FLOW_ITEM_TYPE_IPV4,
188 RTE_FLOW_ITEM_TYPE_IPV6),
189 .actions = valid_actions,
190 .mask = &(const struct rte_flow_item_eth){
191 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
192 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
195 .default_mask = &rte_flow_item_eth_mask,
196 .mask_sz = sizeof(struct rte_flow_item_eth),
197 .convert = mlx5_flow_create_eth,
198 .dst_sz = sizeof(struct ibv_flow_spec_eth),
200 [RTE_FLOW_ITEM_TYPE_VLAN] = {
201 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
202 RTE_FLOW_ITEM_TYPE_IPV6),
203 .actions = valid_actions,
204 .mask = &(const struct rte_flow_item_vlan){
207 .default_mask = &rte_flow_item_vlan_mask,
208 .mask_sz = sizeof(struct rte_flow_item_vlan),
209 .convert = mlx5_flow_create_vlan,
212 [RTE_FLOW_ITEM_TYPE_IPV4] = {
213 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
214 RTE_FLOW_ITEM_TYPE_TCP),
215 .actions = valid_actions,
216 .mask = &(const struct rte_flow_item_ipv4){
220 .type_of_service = -1,
224 .default_mask = &rte_flow_item_ipv4_mask,
225 .mask_sz = sizeof(struct rte_flow_item_ipv4),
226 .convert = mlx5_flow_create_ipv4,
227 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
229 [RTE_FLOW_ITEM_TYPE_IPV6] = {
230 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
231 RTE_FLOW_ITEM_TYPE_TCP),
232 .actions = valid_actions,
233 .mask = &(const struct rte_flow_item_ipv6){
236 0xff, 0xff, 0xff, 0xff,
237 0xff, 0xff, 0xff, 0xff,
238 0xff, 0xff, 0xff, 0xff,
239 0xff, 0xff, 0xff, 0xff,
242 0xff, 0xff, 0xff, 0xff,
243 0xff, 0xff, 0xff, 0xff,
244 0xff, 0xff, 0xff, 0xff,
245 0xff, 0xff, 0xff, 0xff,
252 .default_mask = &rte_flow_item_ipv6_mask,
253 .mask_sz = sizeof(struct rte_flow_item_ipv6),
254 .convert = mlx5_flow_create_ipv6,
255 .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
257 [RTE_FLOW_ITEM_TYPE_UDP] = {
258 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
259 .actions = valid_actions,
260 .mask = &(const struct rte_flow_item_udp){
266 .default_mask = &rte_flow_item_udp_mask,
267 .mask_sz = sizeof(struct rte_flow_item_udp),
268 .convert = mlx5_flow_create_udp,
269 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
271 [RTE_FLOW_ITEM_TYPE_TCP] = {
272 .actions = valid_actions,
273 .mask = &(const struct rte_flow_item_tcp){
279 .default_mask = &rte_flow_item_tcp_mask,
280 .mask_sz = sizeof(struct rte_flow_item_tcp),
281 .convert = mlx5_flow_create_tcp,
282 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
284 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
285 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
286 .actions = valid_actions,
287 .mask = &(const struct rte_flow_item_vxlan){
288 .vni = "\xff\xff\xff",
290 .default_mask = &rte_flow_item_vxlan_mask,
291 .mask_sz = sizeof(struct rte_flow_item_vxlan),
292 .convert = mlx5_flow_create_vxlan,
293 .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
297 /** Structure to pass to the conversion function. */
298 struct mlx5_flow_parse {
299 struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
300 unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
301 uint32_t inner; /**< Set once VXLAN is encountered. */
302 uint32_t create:1; /**< Leave allocated resources on exit. */
303 uint32_t queue:1; /**< Target is a receive queue. */
304 uint32_t drop:1; /**< Target is a drop queue. */
305 uint32_t mark:1; /**< Mark is present in the flow. */
306 uint32_t mark_id; /**< Mark identifier. */
307 uint64_t hash_fields; /**< Fields that participate in the hash. */
308 uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
309 uint16_t queues_n; /**< Number of entries in queue[]. */
312 static const struct rte_flow_ops mlx5_flow_ops = {
313 .validate = mlx5_flow_validate,
314 .create = mlx5_flow_create,
315 .destroy = mlx5_flow_destroy,
316 .flush = mlx5_flow_flush,
318 .isolate = mlx5_flow_isolate,
322 * Manage filter operations.
325 * Pointer to Ethernet device structure.
329 * Operation to perform.
331 * Pointer to operation-specific structure.
334 * 0 on success, negative errno value on failure.
337 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
338 enum rte_filter_type filter_type,
339 enum rte_filter_op filter_op,
344 if (filter_type == RTE_ETH_FILTER_GENERIC) {
345 if (filter_op != RTE_ETH_FILTER_GET)
347 *(const void **)arg = &mlx5_flow_ops;
350 ERROR("%p: filter type (%d) not supported",
351 (void *)dev, filter_type);
356 * Check support for a given item.
359 * Item specification.
361 * Bit-masks covering supported fields to compare with spec, last and mask in
364 * Bit-Mask size in bytes.
370 mlx5_flow_item_validate(const struct rte_flow_item *item,
371 const uint8_t *mask, unsigned int size)
375 if (!item->spec && (item->mask || item->last))
377 if (item->spec && !item->mask) {
379 const uint8_t *spec = item->spec;
381 for (i = 0; i < size; ++i)
382 if ((spec[i] | mask[i]) != mask[i])
385 if (item->last && !item->mask) {
387 const uint8_t *spec = item->last;
389 for (i = 0; i < size; ++i)
390 if ((spec[i] | mask[i]) != mask[i])
395 const uint8_t *spec = item->mask;
397 for (i = 0; i < size; ++i)
398 if ((spec[i] | mask[i]) != mask[i])
401 if (item->spec && item->last) {
404 const uint8_t *apply = mask;
409 for (i = 0; i < size; ++i) {
410 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
411 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
413 ret = memcmp(spec, last, size);
419 * Validate and convert a flow supported by the NIC.
422 * Pointer to private structure.
424 * Flow rule attributes.
426 * Pattern specification (list terminated by the END pattern item).
428 * Associated actions (list terminated by the END action).
430 * Perform verbose error reporting if not NULL.
431 * @param[in, out] parser
432 * Internal parser structure.
435 * 0 on success, a negative errno value otherwise and rte_errno is set.
438 priv_flow_convert(struct priv *priv,
439 const struct rte_flow_attr *attr,
440 const struct rte_flow_item items[],
441 const struct rte_flow_action actions[],
442 struct rte_flow_error *error,
443 struct mlx5_flow_parse *parser)
445 const struct mlx5_flow_items *cur_item = mlx5_flow_items;
448 *parser = (struct mlx5_flow_parse){
449 .ibv_attr = parser->ibv_attr,
450 .create = parser->create,
451 .offset = sizeof(struct ibv_flow_attr),
452 .mark_id = MLX5_FLOW_MARK_DEFAULT,
455 rte_flow_error_set(error, ENOTSUP,
456 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
458 "groups are not supported");
461 if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
462 rte_flow_error_set(error, ENOTSUP,
463 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
465 "priorities are not supported");
469 rte_flow_error_set(error, ENOTSUP,
470 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
472 "egress is not supported");
475 if (!attr->ingress) {
476 rte_flow_error_set(error, ENOTSUP,
477 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
479 "only ingress is supported");
482 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
483 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
485 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
487 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
488 const struct rte_flow_action_queue *queue =
489 (const struct rte_flow_action_queue *)
494 if (!queue || (queue->index > (priv->rxqs_n - 1)))
495 goto exit_action_not_supported;
496 for (n = 0; n < parser->queues_n; ++n) {
497 if (parser->queues[n] == queue->index) {
502 if (parser->queues_n > 1 && !found) {
503 rte_flow_error_set(error, ENOTSUP,
504 RTE_FLOW_ERROR_TYPE_ACTION,
506 "queue action not in RSS queues");
511 parser->queues_n = 1;
512 parser->queues[0] = queue->index;
514 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
515 const struct rte_flow_action_rss *rss =
516 (const struct rte_flow_action_rss *)
520 if (!rss || !rss->num) {
521 rte_flow_error_set(error, EINVAL,
522 RTE_FLOW_ERROR_TYPE_ACTION,
527 if (parser->queues_n == 1) {
530 assert(parser->queues_n);
531 for (n = 0; n < rss->num; ++n) {
532 if (parser->queues[0] ==
539 rte_flow_error_set(error, ENOTSUP,
540 RTE_FLOW_ERROR_TYPE_ACTION,
542 "queue action not in RSS"
547 for (n = 0; n < rss->num; ++n) {
548 if (rss->queue[n] >= priv->rxqs_n) {
549 rte_flow_error_set(error, EINVAL,
550 RTE_FLOW_ERROR_TYPE_ACTION,
552 "queue id > number of"
558 for (n = 0; n < rss->num; ++n)
559 parser->queues[n] = rss->queue[n];
560 parser->queues_n = rss->num;
561 } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
562 const struct rte_flow_action_mark *mark =
563 (const struct rte_flow_action_mark *)
567 rte_flow_error_set(error, EINVAL,
568 RTE_FLOW_ERROR_TYPE_ACTION,
570 "mark must be defined");
572 } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
573 rte_flow_error_set(error, ENOTSUP,
574 RTE_FLOW_ERROR_TYPE_ACTION,
576 "mark must be between 0"
581 parser->mark_id = mark->id;
582 } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
585 goto exit_action_not_supported;
588 if (parser->mark && !parser->ibv_attr && !parser->drop)
589 parser->offset += sizeof(struct ibv_flow_spec_action_tag);
590 if (!parser->ibv_attr && parser->drop)
591 parser->offset += sizeof(struct ibv_flow_spec_action_drop);
592 if (!parser->queue && !parser->drop) {
593 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
594 NULL, "no valid action");
597 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
598 const struct mlx5_flow_items *token = NULL;
602 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
606 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
608 if (cur_item->items[i] == items->type) {
609 token = &mlx5_flow_items[items->type];
614 goto exit_item_not_supported;
616 err = mlx5_flow_item_validate(items,
617 (const uint8_t *)cur_item->mask,
620 goto exit_item_not_supported;
621 if (parser->ibv_attr && cur_item->convert) {
622 err = cur_item->convert(items,
623 (cur_item->default_mask ?
624 cur_item->default_mask :
628 goto exit_item_not_supported;
629 } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
631 rte_flow_error_set(error, ENOTSUP,
632 RTE_FLOW_ERROR_TYPE_ITEM,
634 "cannot recognize multiple"
635 " VXLAN encapsulations");
640 parser->offset += cur_item->dst_sz;
643 exit_item_not_supported:
644 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
645 items, "item not supported");
647 exit_action_not_supported:
648 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
649 actions, "action not supported");
654 * Convert Ethernet item to Verbs specification.
657 * Item specification.
658 * @param default_mask[in]
659 * Default bit-masks to use when item->mask is not provided.
660 * @param data[in, out]
664 mlx5_flow_create_eth(const struct rte_flow_item *item,
665 const void *default_mask,
668 const struct rte_flow_item_eth *spec = item->spec;
669 const struct rte_flow_item_eth *mask = item->mask;
670 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
671 struct ibv_flow_spec_eth *eth;
672 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
675 ++parser->ibv_attr->num_of_specs;
676 parser->hash_fields = 0;
677 eth = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
678 *eth = (struct ibv_flow_spec_eth) {
679 .type = parser->inner | IBV_FLOW_SPEC_ETH,
686 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
687 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
688 eth->val.ether_type = spec->type;
689 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
690 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
691 eth->mask.ether_type = mask->type;
692 /* Remove unwanted bits from values. */
693 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
694 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
695 eth->val.src_mac[i] &= eth->mask.src_mac[i];
697 eth->val.ether_type &= eth->mask.ether_type;
702 * Convert VLAN item to Verbs specification.
705 * Item specification.
706 * @param default_mask[in]
707 * Default bit-masks to use when item->mask is not provided.
708 * @param data[in, out]
712 mlx5_flow_create_vlan(const struct rte_flow_item *item,
713 const void *default_mask,
716 const struct rte_flow_item_vlan *spec = item->spec;
717 const struct rte_flow_item_vlan *mask = item->mask;
718 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
719 struct ibv_flow_spec_eth *eth;
720 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
722 eth = (void *)((uintptr_t)parser->ibv_attr + parser->offset - eth_size);
727 eth->val.vlan_tag = spec->tci;
728 eth->mask.vlan_tag = mask->tci;
729 eth->val.vlan_tag &= eth->mask.vlan_tag;
734 * Convert IPv4 item to Verbs specification.
737 * Item specification.
738 * @param default_mask[in]
739 * Default bit-masks to use when item->mask is not provided.
740 * @param data[in, out]
744 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
745 const void *default_mask,
748 const struct rte_flow_item_ipv4 *spec = item->spec;
749 const struct rte_flow_item_ipv4 *mask = item->mask;
750 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
751 struct ibv_flow_spec_ipv4_ext *ipv4;
752 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
754 ++parser->ibv_attr->num_of_specs;
755 parser->hash_fields = (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4);
756 ipv4 = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
757 *ipv4 = (struct ibv_flow_spec_ipv4_ext) {
758 .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,
765 ipv4->val = (struct ibv_flow_ipv4_ext_filter){
766 .src_ip = spec->hdr.src_addr,
767 .dst_ip = spec->hdr.dst_addr,
768 .proto = spec->hdr.next_proto_id,
769 .tos = spec->hdr.type_of_service,
771 ipv4->mask = (struct ibv_flow_ipv4_ext_filter){
772 .src_ip = mask->hdr.src_addr,
773 .dst_ip = mask->hdr.dst_addr,
774 .proto = mask->hdr.next_proto_id,
775 .tos = mask->hdr.type_of_service,
777 /* Remove unwanted bits from values. */
778 ipv4->val.src_ip &= ipv4->mask.src_ip;
779 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
780 ipv4->val.proto &= ipv4->mask.proto;
781 ipv4->val.tos &= ipv4->mask.tos;
786 * Convert IPv6 item to Verbs specification.
789 * Item specification.
790 * @param default_mask[in]
791 * Default bit-masks to use when item->mask is not provided.
792 * @param data[in, out]
796 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
797 const void *default_mask,
800 const struct rte_flow_item_ipv6 *spec = item->spec;
801 const struct rte_flow_item_ipv6 *mask = item->mask;
802 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
803 struct ibv_flow_spec_ipv6 *ipv6;
804 unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
807 ++parser->ibv_attr->num_of_specs;
808 parser->hash_fields = (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6);
809 ipv6 = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
810 *ipv6 = (struct ibv_flow_spec_ipv6) {
811 .type = parser->inner | IBV_FLOW_SPEC_IPV6,
818 memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
819 RTE_DIM(ipv6->val.src_ip));
820 memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
821 RTE_DIM(ipv6->val.dst_ip));
822 memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
823 RTE_DIM(ipv6->mask.src_ip));
824 memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
825 RTE_DIM(ipv6->mask.dst_ip));
826 ipv6->mask.flow_label = mask->hdr.vtc_flow;
827 ipv6->mask.next_hdr = mask->hdr.proto;
828 ipv6->mask.hop_limit = mask->hdr.hop_limits;
829 /* Remove unwanted bits from values. */
830 for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
831 ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
832 ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
834 ipv6->val.flow_label &= ipv6->mask.flow_label;
835 ipv6->val.next_hdr &= ipv6->mask.next_hdr;
836 ipv6->val.hop_limit &= ipv6->mask.hop_limit;
841 * Convert UDP item to Verbs specification.
844 * Item specification.
845 * @param default_mask[in]
846 * Default bit-masks to use when item->mask is not provided.
847 * @param data[in, out]
851 mlx5_flow_create_udp(const struct rte_flow_item *item,
852 const void *default_mask,
855 const struct rte_flow_item_udp *spec = item->spec;
856 const struct rte_flow_item_udp *mask = item->mask;
857 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
858 struct ibv_flow_spec_tcp_udp *udp;
859 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
861 ++parser->ibv_attr->num_of_specs;
862 parser->hash_fields |= (IBV_RX_HASH_SRC_PORT_UDP |
863 IBV_RX_HASH_DST_PORT_UDP);
864 udp = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
865 *udp = (struct ibv_flow_spec_tcp_udp) {
866 .type = parser->inner | IBV_FLOW_SPEC_UDP,
873 udp->val.dst_port = spec->hdr.dst_port;
874 udp->val.src_port = spec->hdr.src_port;
875 udp->mask.dst_port = mask->hdr.dst_port;
876 udp->mask.src_port = mask->hdr.src_port;
877 /* Remove unwanted bits from values. */
878 udp->val.src_port &= udp->mask.src_port;
879 udp->val.dst_port &= udp->mask.dst_port;
884 * Convert TCP item to Verbs specification.
887 * Item specification.
888 * @param default_mask[in]
889 * Default bit-masks to use when item->mask is not provided.
890 * @param data[in, out]
894 mlx5_flow_create_tcp(const struct rte_flow_item *item,
895 const void *default_mask,
898 const struct rte_flow_item_tcp *spec = item->spec;
899 const struct rte_flow_item_tcp *mask = item->mask;
900 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
901 struct ibv_flow_spec_tcp_udp *tcp;
902 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
904 ++parser->ibv_attr->num_of_specs;
905 parser->hash_fields |= (IBV_RX_HASH_SRC_PORT_TCP |
906 IBV_RX_HASH_DST_PORT_TCP);
907 tcp = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
908 *tcp = (struct ibv_flow_spec_tcp_udp) {
909 .type = parser->inner | IBV_FLOW_SPEC_TCP,
916 tcp->val.dst_port = spec->hdr.dst_port;
917 tcp->val.src_port = spec->hdr.src_port;
918 tcp->mask.dst_port = mask->hdr.dst_port;
919 tcp->mask.src_port = mask->hdr.src_port;
920 /* Remove unwanted bits from values. */
921 tcp->val.src_port &= tcp->mask.src_port;
922 tcp->val.dst_port &= tcp->mask.dst_port;
927 * Convert VXLAN item to Verbs specification.
930 * Item specification.
931 * @param default_mask[in]
932 * Default bit-masks to use when item->mask is not provided.
933 * @param data[in, out]
937 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
938 const void *default_mask,
941 const struct rte_flow_item_vxlan *spec = item->spec;
942 const struct rte_flow_item_vxlan *mask = item->mask;
943 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
944 struct ibv_flow_spec_tunnel *vxlan;
945 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
951 ++parser->ibv_attr->num_of_specs;
953 vxlan = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
954 *vxlan = (struct ibv_flow_spec_tunnel) {
955 .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
958 parser->inner = IBV_FLOW_SPEC_INNER;
963 memcpy(&id.vni[1], spec->vni, 3);
964 vxlan->val.tunnel_id = id.vlan_id;
965 memcpy(&id.vni[1], mask->vni, 3);
966 vxlan->mask.tunnel_id = id.vlan_id;
967 /* Remove unwanted bits from values. */
968 vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
973 * Convert mark/flag action to Verbs specification.
976 * Internal parser structure.
981 mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
983 struct ibv_flow_spec_action_tag *tag;
984 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
986 assert(parser->mark);
987 tag = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
988 *tag = (struct ibv_flow_spec_action_tag){
989 .type = IBV_FLOW_SPEC_ACTION_TAG,
991 .tag_id = mlx5_flow_mark_set(mark_id),
993 ++parser->ibv_attr->num_of_specs;
994 parser->offset += size;
999 * Complete flow rule creation with a drop queue.
1002 * Pointer to private structure.
1004 * Internal parser structure.
1006 * Perform verbose error reporting if not NULL.
1009 * A flow if the rule could be created.
1011 static struct rte_flow *
1012 priv_flow_create_action_queue_drop(struct priv *priv,
1013 struct mlx5_flow_parse *parser,
1014 struct rte_flow_error *error)
1016 struct rte_flow *rte_flow;
1017 struct ibv_flow_spec_action_drop *drop;
1018 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1022 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
1024 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1025 NULL, "cannot allocate flow memory");
1029 drop = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
1030 *drop = (struct ibv_flow_spec_action_drop){
1031 .type = IBV_FLOW_SPEC_ACTION_DROP,
1034 ++parser->ibv_attr->num_of_specs;
1035 parser->offset += sizeof(struct ibv_flow_spec_action_drop);
1036 rte_flow->drxq.ibv_attr = parser->ibv_attr;
1037 if (!priv->dev->data->dev_started)
1039 rte_flow->drxq.hrxq.qp = priv->flow_drop_queue->qp;
1040 rte_flow->drxq.ibv_flow = ibv_create_flow(rte_flow->drxq.hrxq.qp,
1041 rte_flow->drxq.ibv_attr);
1042 if (!rte_flow->drxq.ibv_flow) {
1043 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1044 NULL, "flow rule creation failure");
1055 * Complete flow rule creation.
1058 * Pointer to private structure.
1060 * MLX5 flow parser attributes (filled by mlx5_flow_validate()).
1062 * Perform verbose error reporting if not NULL.
1065 * A flow if the rule could be created.
1067 static struct rte_flow *
1068 priv_flow_create_action_queue(struct priv *priv,
1069 struct mlx5_flow_parse *parser,
1070 struct rte_flow_error *error)
1072 struct rte_flow *rte_flow;
1077 assert(!parser->drop);
1078 rte_flow = rte_calloc(__func__, 1,
1080 parser->queues_n * sizeof(uint16_t),
1083 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1084 NULL, "cannot allocate flow memory");
1087 rte_flow->mark = parser->mark;
1088 rte_flow->frxq.ibv_attr = parser->ibv_attr;
1089 rte_flow->queues = (uint16_t (*)[])(rte_flow + 1);
1090 memcpy(rte_flow->queues, parser->queues,
1091 parser->queues_n * sizeof(uint16_t));
1092 rte_flow->queues_n = parser->queues_n;
1093 rte_flow->frxq.hash_fields = parser->hash_fields;
1094 rte_flow->frxq.hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1095 rss_hash_default_key_len,
1096 parser->hash_fields,
1097 (*rte_flow->queues),
1098 rte_flow->queues_n);
1099 if (!rte_flow->frxq.hrxq) {
1100 rte_flow->frxq.hrxq =
1101 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1102 rss_hash_default_key_len,
1103 parser->hash_fields,
1104 (*rte_flow->queues),
1105 rte_flow->queues_n);
1106 if (!rte_flow->frxq.hrxq) {
1107 rte_flow_error_set(error, ENOMEM,
1108 RTE_FLOW_ERROR_TYPE_HANDLE,
1109 NULL, "cannot create hash rxq");
1113 for (i = 0; i != parser->queues_n; ++i) {
1114 struct mlx5_rxq_data *q =
1115 (*priv->rxqs)[parser->queues[i]];
1117 q->mark |= parser->mark;
1119 if (!priv->dev->data->dev_started)
1121 rte_flow->frxq.ibv_flow = ibv_create_flow(rte_flow->frxq.hrxq->qp,
1122 rte_flow->frxq.ibv_attr);
1123 if (!rte_flow->frxq.ibv_flow) {
1124 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1125 NULL, "flow rule creation failure");
1131 if (rte_flow->frxq.hrxq)
1132 mlx5_priv_hrxq_release(priv, rte_flow->frxq.hrxq);
1141 * Pointer to private structure.
1143 * Flow rule attributes.
1144 * @param[in] pattern
1145 * Pattern specification (list terminated by the END pattern item).
1146 * @param[in] actions
1147 * Associated actions (list terminated by the END action).
1149 * Perform verbose error reporting if not NULL.
1150 * @param[in,out] parser
1151 * MLX5 parser structure.
1154 * 0 on success, negative errno value on failure.
1157 priv_flow_validate(struct priv *priv,
1158 const struct rte_flow_attr *attr,
1159 const struct rte_flow_item items[],
1160 const struct rte_flow_action actions[],
1161 struct rte_flow_error *error,
1162 struct mlx5_flow_parse *parser)
1166 err = priv_flow_convert(priv, attr, items, actions, error, parser);
1170 parser->offset += sizeof(struct ibv_flow_spec_action_tag);
1171 parser->ibv_attr = rte_malloc(__func__, parser->offset, 0);
1172 if (!parser->ibv_attr) {
1173 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1174 NULL, "cannot allocate ibv_attr memory");
1178 *parser->ibv_attr = (struct ibv_flow_attr){
1179 .type = IBV_FLOW_ATTR_NORMAL,
1180 .size = sizeof(struct ibv_flow_attr),
1181 .priority = attr->priority,
1186 err = priv_flow_convert(priv, attr, items, actions, error, parser);
1187 if (err || parser->create)
1190 mlx5_flow_create_flag_mark(parser, parser->mark_id);
1193 if (parser->ibv_attr)
1194 rte_free(parser->ibv_attr);
1202 * Pointer to private structure.
1204 * Pointer to a TAILQ flow list.
1206 * Flow rule attributes.
1207 * @param[in] pattern
1208 * Pattern specification (list terminated by the END pattern item).
1209 * @param[in] actions
1210 * Associated actions (list terminated by the END action).
1212 * Perform verbose error reporting if not NULL.
1215 * A flow on success, NULL otherwise.
1217 static struct rte_flow *
1218 priv_flow_create(struct priv *priv,
1219 struct mlx5_flows *list,
1220 const struct rte_flow_attr *attr,
1221 const struct rte_flow_item items[],
1222 const struct rte_flow_action actions[],
1223 struct rte_flow_error *error)
1225 struct mlx5_flow_parse parser = { .create = 1, };
1226 struct rte_flow *flow;
1229 err = priv_flow_validate(priv, attr, items, actions, error, &parser);
1233 flow = priv_flow_create_action_queue_drop(priv, &parser, error);
1235 flow = priv_flow_create_action_queue(priv, &parser, error);
1238 TAILQ_INSERT_TAIL(list, flow, next);
1239 DEBUG("Flow created %p", (void *)flow);
1242 if (parser.ibv_attr)
1243 rte_free(parser.ibv_attr);
1248 * Validate a flow supported by the NIC.
1250 * @see rte_flow_validate()
1254 mlx5_flow_validate(struct rte_eth_dev *dev,
1255 const struct rte_flow_attr *attr,
1256 const struct rte_flow_item items[],
1257 const struct rte_flow_action actions[],
1258 struct rte_flow_error *error)
1260 struct priv *priv = dev->data->dev_private;
1262 struct mlx5_flow_parse parser = { .create = 0, };
1265 ret = priv_flow_validate(priv, attr, items, actions, error, &parser);
1273 * @see rte_flow_create()
1277 mlx5_flow_create(struct rte_eth_dev *dev,
1278 const struct rte_flow_attr *attr,
1279 const struct rte_flow_item items[],
1280 const struct rte_flow_action actions[],
1281 struct rte_flow_error *error)
1283 struct priv *priv = dev->data->dev_private;
1284 struct rte_flow *flow;
1287 flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
1297 * Pointer to private structure.
1299 * Pointer to a TAILQ flow list.
1304 priv_flow_destroy(struct priv *priv,
1305 struct mlx5_flows *list,
1306 struct rte_flow *flow)
1312 if (flow->drop || !flow->mark)
1314 queues = flow->frxq.hrxq->ind_table->queues;
1315 queues_n = flow->frxq.hrxq->ind_table->queues_n;
1316 for (i = 0; i != queues_n; ++i) {
1317 struct rte_flow *tmp;
1318 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[i]];
1322 * To remove the mark from the queue, the queue must not be
1323 * present in any other marked flow (RSS or not).
1325 TAILQ_FOREACH(tmp, list, next) {
1331 (j != tmp->frxq.hrxq->ind_table->queues_n) &&
1334 if (tmp->frxq.hrxq->ind_table->queues[j] ==
1338 rxq_data->mark = mark;
1342 if (flow->drxq.ibv_flow)
1343 claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
1344 rte_free(flow->drxq.ibv_attr);
1346 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1347 if (flow->frxq.ibv_flow)
1348 claim_zero(ibv_destroy_flow(flow->frxq.ibv_flow));
1349 rte_free(flow->frxq.ibv_attr);
1351 TAILQ_REMOVE(list, flow, next);
1352 DEBUG("Flow destroyed %p", (void *)flow);
1357 * Destroy all flows.
1360 * Pointer to private structure.
1362 * Pointer to a TAILQ flow list.
1365 priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
1367 while (!TAILQ_EMPTY(list)) {
1368 struct rte_flow *flow;
1370 flow = TAILQ_FIRST(list);
1371 priv_flow_destroy(priv, list, flow);
1376 * Create drop queue.
1379 * Pointer to private structure.
1385 priv_flow_create_drop_queue(struct priv *priv)
1387 struct mlx5_hrxq_drop *fdq = NULL;
1391 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
1393 WARN("cannot allocate memory for drop queue");
1396 fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
1398 WARN("cannot allocate CQ for drop queue");
1401 fdq->wq = ibv_create_wq(priv->ctx,
1402 &(struct ibv_wq_init_attr){
1403 .wq_type = IBV_WQT_RQ,
1410 WARN("cannot allocate WQ for drop queue");
1413 fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
1414 &(struct ibv_rwq_ind_table_init_attr){
1415 .log_ind_tbl_size = 0,
1416 .ind_tbl = &fdq->wq,
1419 if (!fdq->ind_table) {
1420 WARN("cannot allocate indirection table for drop queue");
1423 fdq->qp = ibv_create_qp_ex(priv->ctx,
1424 &(struct ibv_qp_init_attr_ex){
1425 .qp_type = IBV_QPT_RAW_PACKET,
1427 IBV_QP_INIT_ATTR_PD |
1428 IBV_QP_INIT_ATTR_IND_TABLE |
1429 IBV_QP_INIT_ATTR_RX_HASH,
1430 .rx_hash_conf = (struct ibv_rx_hash_conf){
1432 IBV_RX_HASH_FUNC_TOEPLITZ,
1433 .rx_hash_key_len = rss_hash_default_key_len,
1434 .rx_hash_key = rss_hash_default_key,
1435 .rx_hash_fields_mask = 0,
1437 .rwq_ind_tbl = fdq->ind_table,
1441 WARN("cannot allocate QP for drop queue");
1444 priv->flow_drop_queue = fdq;
1448 claim_zero(ibv_destroy_qp(fdq->qp));
1450 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1452 claim_zero(ibv_destroy_wq(fdq->wq));
1454 claim_zero(ibv_destroy_cq(fdq->cq));
1457 priv->flow_drop_queue = NULL;
1462 * Delete drop queue.
1465 * Pointer to private structure.
1468 priv_flow_delete_drop_queue(struct priv *priv)
1470 struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
1475 claim_zero(ibv_destroy_qp(fdq->qp));
1477 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1479 claim_zero(ibv_destroy_wq(fdq->wq));
1481 claim_zero(ibv_destroy_cq(fdq->cq));
1483 priv->flow_drop_queue = NULL;
1490 * Pointer to private structure.
1492 * Pointer to a TAILQ flow list.
1495 priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
1497 struct rte_flow *flow;
1499 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
1500 assert(!flow->drop);
1501 claim_zero(ibv_destroy_flow(flow->frxq.ibv_flow));
1502 flow->frxq.ibv_flow = NULL;
1503 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1504 flow->frxq.hrxq = NULL;
1507 struct mlx5_ind_table_ibv *ind_tbl =
1508 flow->frxq.hrxq->ind_table;
1510 for (n = 0; n < ind_tbl->queues_n; ++n)
1511 (*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
1513 DEBUG("Flow %p removed", (void *)flow);
1521 * Pointer to private structure.
1523 * Pointer to a TAILQ flow list.
1526 * 0 on success, a errno value otherwise and rte_errno is set.
1529 priv_flow_start(struct priv *priv, struct mlx5_flows *list)
1531 struct rte_flow *flow;
1533 TAILQ_FOREACH(flow, list, next) {
1534 if (flow->frxq.hrxq)
1537 mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1538 rss_hash_default_key_len,
1539 flow->frxq.hash_fields,
1542 if (flow->frxq.hrxq)
1545 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1546 rss_hash_default_key_len,
1547 flow->frxq.hash_fields,
1550 if (!flow->frxq.hrxq) {
1551 DEBUG("Flow %p cannot be applied",
1557 flow->frxq.ibv_flow = ibv_create_flow(flow->frxq.hrxq->qp,
1558 flow->frxq.ibv_attr);
1559 if (!flow->frxq.ibv_flow) {
1560 DEBUG("Flow %p cannot be applied", (void *)flow);
1564 DEBUG("Flow %p applied", (void *)flow);
1569 n < flow->frxq.hrxq->ind_table->queues_n;
1572 flow->frxq.hrxq->ind_table->queues[n];
1573 (*priv->rxqs)[idx]->mark = 1;
1581 * Verify the flow list is empty
1584 * Pointer to private structure.
1586 * @return the number of flows not released.
1589 priv_flow_verify(struct priv *priv)
1591 struct rte_flow *flow;
1594 TAILQ_FOREACH(flow, &priv->flows, next) {
1595 DEBUG("%p: flow %p still referenced", (void *)priv,
1603 * Enable a control flow configured from the control plane.
1606 * Pointer to Ethernet device.
1608 * An Ethernet flow spec to apply.
1610 * An Ethernet flow mask to apply.
1612 * A VLAN flow spec to apply.
1614 * A VLAN flow mask to apply.
1620 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1621 struct rte_flow_item_eth *eth_spec,
1622 struct rte_flow_item_eth *eth_mask,
1623 struct rte_flow_item_vlan *vlan_spec,
1624 struct rte_flow_item_vlan *vlan_mask)
1626 struct priv *priv = dev->data->dev_private;
1627 const struct rte_flow_attr attr = {
1629 .priority = MLX5_CTRL_FLOW_PRIORITY,
1631 struct rte_flow_item items[] = {
1633 .type = RTE_FLOW_ITEM_TYPE_ETH,
1639 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
1640 RTE_FLOW_ITEM_TYPE_END,
1646 .type = RTE_FLOW_ITEM_TYPE_END,
1649 struct rte_flow_action actions[] = {
1651 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1652 .conf = &(struct rte_flow_action_queue){
1657 .type = RTE_FLOW_ACTION_TYPE_END,
1660 struct rte_flow *flow;
1661 struct rte_flow_error error;
1663 flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
1671 * Enable a flow control configured from the control plane.
1674 * Pointer to Ethernet device.
1676 * An Ethernet flow spec to apply.
1678 * An Ethernet flow mask to apply.
1684 mlx5_ctrl_flow(struct rte_eth_dev *dev,
1685 struct rte_flow_item_eth *eth_spec,
1686 struct rte_flow_item_eth *eth_mask)
1688 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
1694 * @see rte_flow_destroy()
1698 mlx5_flow_destroy(struct rte_eth_dev *dev,
1699 struct rte_flow *flow,
1700 struct rte_flow_error *error)
1702 struct priv *priv = dev->data->dev_private;
1706 priv_flow_destroy(priv, &priv->flows, flow);
1712 * Destroy all flows.
1714 * @see rte_flow_flush()
1718 mlx5_flow_flush(struct rte_eth_dev *dev,
1719 struct rte_flow_error *error)
1721 struct priv *priv = dev->data->dev_private;
1725 priv_flow_flush(priv, &priv->flows);
1733 * @see rte_flow_isolate()
1737 mlx5_flow_isolate(struct rte_eth_dev *dev,
1739 struct rte_flow_error *error)
1741 struct priv *priv = dev->data->dev_private;
1744 if (dev->data->dev_started) {
1745 rte_flow_error_set(error, EBUSY,
1746 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1748 "port must be stopped first");
1752 priv->isolated = !!enable;