4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
38 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #pragma GCC diagnostic ignored "-Wpedantic"
42 #include <infiniband/verbs.h>
44 #pragma GCC diagnostic error "-Wpedantic"
47 #include <rte_ethdev.h>
49 #include <rte_flow_driver.h>
50 #include <rte_malloc.h>
55 /* Define minimal priority for control plane flows. */
56 #define MLX5_CTRL_FLOW_PRIORITY 4
59 mlx5_flow_create_eth(const struct rte_flow_item *item,
60 const void *default_mask,
64 mlx5_flow_create_vlan(const struct rte_flow_item *item,
65 const void *default_mask,
69 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
70 const void *default_mask,
74 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
75 const void *default_mask,
79 mlx5_flow_create_udp(const struct rte_flow_item *item,
80 const void *default_mask,
84 mlx5_flow_create_tcp(const struct rte_flow_item *item,
85 const void *default_mask,
89 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
90 const void *default_mask,
93 /** Structure for Drop queue. */
94 struct mlx5_hrxq_drop {
95 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
96 struct ibv_qp *qp; /**< Verbs queue pair. */
97 struct ibv_wq *wq; /**< Verbs work queue. */
98 struct ibv_cq *cq; /**< Verbs completion queue. */
101 /* Flows structures. */
103 uint64_t hash_fields; /**< Fields that participate in the hash. */
104 struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
107 /* Drop flows structures. */
108 struct mlx5_flow_drop {
109 struct mlx5_hrxq_drop hrxq; /**< Drop hash Rx queue. */
113 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
114 uint32_t mark:1; /**< Set if the flow is marked. */
115 uint32_t drop:1; /**< Drop queue. */
116 struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
117 struct ibv_flow *ibv_flow; /**< Verbs flow. */
118 uint16_t queues_n; /**< Number of entries in queue[]. */
119 uint16_t (*queues)[]; /**< Queues indexes to use. */
121 struct mlx5_flow frxq; /**< Flow with Rx queue. */
122 struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
126 /** Static initializer for items. */
128 (const enum rte_flow_item_type []){ \
129 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
132 /** Structure to generate a simple graph of layers supported by the NIC. */
133 struct mlx5_flow_items {
134 /** List of possible actions for these items. */
135 const enum rte_flow_action_type *const actions;
136 /** Bit-masks corresponding to the possibilities for the item. */
139 * Default bit-masks to use when item->mask is not provided. When
140 * \default_mask is also NULL, the full supported bit-mask (\mask) is
143 const void *default_mask;
144 /** Bit-masks size in bytes. */
145 const unsigned int mask_sz;
147 * Conversion function from rte_flow to NIC specific flow.
150 * rte_flow item to convert.
151 * @param default_mask
152 * Default bit-masks to use when item->mask is not provided.
154 * Internal structure to store the conversion.
157 * 0 on success, negative value otherwise.
159 int (*convert)(const struct rte_flow_item *item,
160 const void *default_mask,
162 /** Size in bytes of the destination structure. */
163 const unsigned int dst_sz;
164 /** List of possible following items. */
165 const enum rte_flow_item_type *const items;
168 /** Valid action for this PMD. */
169 static const enum rte_flow_action_type valid_actions[] = {
170 RTE_FLOW_ACTION_TYPE_DROP,
171 RTE_FLOW_ACTION_TYPE_QUEUE,
172 RTE_FLOW_ACTION_TYPE_MARK,
173 RTE_FLOW_ACTION_TYPE_FLAG,
174 RTE_FLOW_ACTION_TYPE_END,
177 /** Graph of supported items and associated actions. */
178 static const struct mlx5_flow_items mlx5_flow_items[] = {
179 [RTE_FLOW_ITEM_TYPE_END] = {
180 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
181 RTE_FLOW_ITEM_TYPE_VXLAN),
183 [RTE_FLOW_ITEM_TYPE_ETH] = {
184 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
185 RTE_FLOW_ITEM_TYPE_IPV4,
186 RTE_FLOW_ITEM_TYPE_IPV6),
187 .actions = valid_actions,
188 .mask = &(const struct rte_flow_item_eth){
189 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
190 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
193 .default_mask = &rte_flow_item_eth_mask,
194 .mask_sz = sizeof(struct rte_flow_item_eth),
195 .convert = mlx5_flow_create_eth,
196 .dst_sz = sizeof(struct ibv_flow_spec_eth),
198 [RTE_FLOW_ITEM_TYPE_VLAN] = {
199 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
200 RTE_FLOW_ITEM_TYPE_IPV6),
201 .actions = valid_actions,
202 .mask = &(const struct rte_flow_item_vlan){
205 .default_mask = &rte_flow_item_vlan_mask,
206 .mask_sz = sizeof(struct rte_flow_item_vlan),
207 .convert = mlx5_flow_create_vlan,
210 [RTE_FLOW_ITEM_TYPE_IPV4] = {
211 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
212 RTE_FLOW_ITEM_TYPE_TCP),
213 .actions = valid_actions,
214 .mask = &(const struct rte_flow_item_ipv4){
218 .type_of_service = -1,
222 .default_mask = &rte_flow_item_ipv4_mask,
223 .mask_sz = sizeof(struct rte_flow_item_ipv4),
224 .convert = mlx5_flow_create_ipv4,
225 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
227 [RTE_FLOW_ITEM_TYPE_IPV6] = {
228 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
229 RTE_FLOW_ITEM_TYPE_TCP),
230 .actions = valid_actions,
231 .mask = &(const struct rte_flow_item_ipv6){
234 0xff, 0xff, 0xff, 0xff,
235 0xff, 0xff, 0xff, 0xff,
236 0xff, 0xff, 0xff, 0xff,
237 0xff, 0xff, 0xff, 0xff,
240 0xff, 0xff, 0xff, 0xff,
241 0xff, 0xff, 0xff, 0xff,
242 0xff, 0xff, 0xff, 0xff,
243 0xff, 0xff, 0xff, 0xff,
250 .default_mask = &rte_flow_item_ipv6_mask,
251 .mask_sz = sizeof(struct rte_flow_item_ipv6),
252 .convert = mlx5_flow_create_ipv6,
253 .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
255 [RTE_FLOW_ITEM_TYPE_UDP] = {
256 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
257 .actions = valid_actions,
258 .mask = &(const struct rte_flow_item_udp){
264 .default_mask = &rte_flow_item_udp_mask,
265 .mask_sz = sizeof(struct rte_flow_item_udp),
266 .convert = mlx5_flow_create_udp,
267 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
269 [RTE_FLOW_ITEM_TYPE_TCP] = {
270 .actions = valid_actions,
271 .mask = &(const struct rte_flow_item_tcp){
277 .default_mask = &rte_flow_item_tcp_mask,
278 .mask_sz = sizeof(struct rte_flow_item_tcp),
279 .convert = mlx5_flow_create_tcp,
280 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
282 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
283 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
284 .actions = valid_actions,
285 .mask = &(const struct rte_flow_item_vxlan){
286 .vni = "\xff\xff\xff",
288 .default_mask = &rte_flow_item_vxlan_mask,
289 .mask_sz = sizeof(struct rte_flow_item_vxlan),
290 .convert = mlx5_flow_create_vxlan,
291 .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
295 /** Structure to pass to the conversion function. */
296 struct mlx5_flow_parse {
297 struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
298 unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
299 uint32_t inner; /**< Set once VXLAN is encountered. */
300 uint32_t create:1; /**< Leave allocated resources on exit. */
301 uint32_t queue:1; /**< Target is a receive queue. */
302 uint32_t drop:1; /**< Target is a drop queue. */
303 uint32_t mark:1; /**< Mark is present in the flow. */
304 uint32_t mark_id; /**< Mark identifier. */
305 uint64_t hash_fields; /**< Fields that participate in the hash. */
306 uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
307 uint16_t queues_n; /**< Number of entries in queue[]. */
310 static const struct rte_flow_ops mlx5_flow_ops = {
311 .validate = mlx5_flow_validate,
312 .create = mlx5_flow_create,
313 .destroy = mlx5_flow_destroy,
314 .flush = mlx5_flow_flush,
316 .isolate = mlx5_flow_isolate,
320 * Manage filter operations.
323 * Pointer to Ethernet device structure.
327 * Operation to perform.
329 * Pointer to operation-specific structure.
332 * 0 on success, negative errno value on failure.
335 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
336 enum rte_filter_type filter_type,
337 enum rte_filter_op filter_op,
342 if (filter_type == RTE_ETH_FILTER_GENERIC) {
343 if (filter_op != RTE_ETH_FILTER_GET)
345 *(const void **)arg = &mlx5_flow_ops;
348 ERROR("%p: filter type (%d) not supported",
349 (void *)dev, filter_type);
354 * Check support for a given item.
357 * Item specification.
359 * Bit-masks covering supported fields to compare with spec, last and mask in
362 * Bit-Mask size in bytes.
368 mlx5_flow_item_validate(const struct rte_flow_item *item,
369 const uint8_t *mask, unsigned int size)
373 if (!item->spec && (item->mask || item->last))
375 if (item->spec && !item->mask) {
377 const uint8_t *spec = item->spec;
379 for (i = 0; i < size; ++i)
380 if ((spec[i] | mask[i]) != mask[i])
383 if (item->last && !item->mask) {
385 const uint8_t *spec = item->last;
387 for (i = 0; i < size; ++i)
388 if ((spec[i] | mask[i]) != mask[i])
393 const uint8_t *spec = item->mask;
395 for (i = 0; i < size; ++i)
396 if ((spec[i] | mask[i]) != mask[i])
399 if (item->spec && item->last) {
402 const uint8_t *apply = mask;
407 for (i = 0; i < size; ++i) {
408 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
409 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
411 ret = memcmp(spec, last, size);
417 * Validate and convert a flow supported by the NIC.
420 * Pointer to private structure.
422 * Flow rule attributes.
424 * Pattern specification (list terminated by the END pattern item).
426 * Associated actions (list terminated by the END action).
428 * Perform verbose error reporting if not NULL.
429 * @param[in, out] parser
430 * Internal parser structure.
433 * 0 on success, a negative errno value otherwise and rte_errno is set.
436 priv_flow_convert(struct priv *priv,
437 const struct rte_flow_attr *attr,
438 const struct rte_flow_item items[],
439 const struct rte_flow_action actions[],
440 struct rte_flow_error *error,
441 struct mlx5_flow_parse *parser)
443 const struct mlx5_flow_items *cur_item = mlx5_flow_items;
446 *parser = (struct mlx5_flow_parse){
447 .ibv_attr = parser->ibv_attr,
448 .create = parser->create,
449 .offset = sizeof(struct ibv_flow_attr),
450 .mark_id = MLX5_FLOW_MARK_DEFAULT,
453 rte_flow_error_set(error, ENOTSUP,
454 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
456 "groups are not supported");
459 if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
460 rte_flow_error_set(error, ENOTSUP,
461 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
463 "priorities are not supported");
467 rte_flow_error_set(error, ENOTSUP,
468 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
470 "egress is not supported");
473 if (!attr->ingress) {
474 rte_flow_error_set(error, ENOTSUP,
475 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
477 "only ingress is supported");
480 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
481 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
483 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
485 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
486 const struct rte_flow_action_queue *queue =
487 (const struct rte_flow_action_queue *)
492 if (!queue || (queue->index > (priv->rxqs_n - 1)))
493 goto exit_action_not_supported;
494 for (n = 0; n < parser->queues_n; ++n) {
495 if (parser->queues[n] == queue->index) {
500 if (parser->queues_n > 1 && !found) {
501 rte_flow_error_set(error, ENOTSUP,
502 RTE_FLOW_ERROR_TYPE_ACTION,
504 "queue action not in RSS queues");
509 parser->queues_n = 1;
510 parser->queues[0] = queue->index;
512 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
513 const struct rte_flow_action_rss *rss =
514 (const struct rte_flow_action_rss *)
518 if (!rss || !rss->num) {
519 rte_flow_error_set(error, EINVAL,
520 RTE_FLOW_ERROR_TYPE_ACTION,
525 if (parser->queues_n == 1) {
528 assert(parser->queues_n);
529 for (n = 0; n < rss->num; ++n) {
530 if (parser->queues[0] ==
537 rte_flow_error_set(error, ENOTSUP,
538 RTE_FLOW_ERROR_TYPE_ACTION,
540 "queue action not in RSS"
545 for (n = 0; n < rss->num; ++n) {
546 if (rss->queue[n] >= priv->rxqs_n) {
547 rte_flow_error_set(error, EINVAL,
548 RTE_FLOW_ERROR_TYPE_ACTION,
550 "queue id > number of"
556 for (n = 0; n < rss->num; ++n)
557 parser->queues[n] = rss->queue[n];
558 parser->queues_n = rss->num;
559 } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
560 const struct rte_flow_action_mark *mark =
561 (const struct rte_flow_action_mark *)
565 rte_flow_error_set(error, EINVAL,
566 RTE_FLOW_ERROR_TYPE_ACTION,
568 "mark must be defined");
570 } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
571 rte_flow_error_set(error, ENOTSUP,
572 RTE_FLOW_ERROR_TYPE_ACTION,
574 "mark must be between 0"
579 parser->mark_id = mark->id;
580 } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
583 goto exit_action_not_supported;
586 if (parser->mark && !parser->ibv_attr && !parser->drop)
587 parser->offset += sizeof(struct ibv_flow_spec_action_tag);
588 if (!parser->ibv_attr && parser->drop)
589 parser->offset += sizeof(struct ibv_flow_spec_action_drop);
590 if (!parser->queue && !parser->drop) {
591 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
592 NULL, "no valid action");
595 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
596 const struct mlx5_flow_items *token = NULL;
600 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
604 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
606 if (cur_item->items[i] == items->type) {
607 token = &mlx5_flow_items[items->type];
612 goto exit_item_not_supported;
614 err = mlx5_flow_item_validate(items,
615 (const uint8_t *)cur_item->mask,
618 goto exit_item_not_supported;
619 if (parser->ibv_attr && cur_item->convert) {
620 err = cur_item->convert(items,
621 (cur_item->default_mask ?
622 cur_item->default_mask :
626 goto exit_item_not_supported;
627 } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
629 rte_flow_error_set(error, ENOTSUP,
630 RTE_FLOW_ERROR_TYPE_ITEM,
632 "cannot recognize multiple"
633 " VXLAN encapsulations");
638 parser->offset += cur_item->dst_sz;
641 exit_item_not_supported:
642 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
643 items, "item not supported");
645 exit_action_not_supported:
646 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
647 actions, "action not supported");
652 * Convert Ethernet item to Verbs specification.
655 * Item specification.
656 * @param default_mask[in]
657 * Default bit-masks to use when item->mask is not provided.
658 * @param data[in, out]
662 mlx5_flow_create_eth(const struct rte_flow_item *item,
663 const void *default_mask,
666 const struct rte_flow_item_eth *spec = item->spec;
667 const struct rte_flow_item_eth *mask = item->mask;
668 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
669 struct ibv_flow_spec_eth *eth;
670 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
673 ++parser->ibv_attr->num_of_specs;
674 parser->hash_fields = 0;
675 eth = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
676 *eth = (struct ibv_flow_spec_eth) {
677 .type = parser->inner | IBV_FLOW_SPEC_ETH,
684 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
685 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
686 eth->val.ether_type = spec->type;
687 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
688 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
689 eth->mask.ether_type = mask->type;
690 /* Remove unwanted bits from values. */
691 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
692 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
693 eth->val.src_mac[i] &= eth->mask.src_mac[i];
695 eth->val.ether_type &= eth->mask.ether_type;
700 * Convert VLAN item to Verbs specification.
703 * Item specification.
704 * @param default_mask[in]
705 * Default bit-masks to use when item->mask is not provided.
706 * @param data[in, out]
710 mlx5_flow_create_vlan(const struct rte_flow_item *item,
711 const void *default_mask,
714 const struct rte_flow_item_vlan *spec = item->spec;
715 const struct rte_flow_item_vlan *mask = item->mask;
716 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
717 struct ibv_flow_spec_eth *eth;
718 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
720 eth = (void *)((uintptr_t)parser->ibv_attr + parser->offset - eth_size);
725 eth->val.vlan_tag = spec->tci;
726 eth->mask.vlan_tag = mask->tci;
727 eth->val.vlan_tag &= eth->mask.vlan_tag;
732 * Convert IPv4 item to Verbs specification.
735 * Item specification.
736 * @param default_mask[in]
737 * Default bit-masks to use when item->mask is not provided.
738 * @param data[in, out]
742 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
743 const void *default_mask,
746 const struct rte_flow_item_ipv4 *spec = item->spec;
747 const struct rte_flow_item_ipv4 *mask = item->mask;
748 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
749 struct ibv_flow_spec_ipv4_ext *ipv4;
750 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
752 ++parser->ibv_attr->num_of_specs;
753 parser->hash_fields = (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4);
754 ipv4 = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
755 *ipv4 = (struct ibv_flow_spec_ipv4_ext) {
756 .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,
763 ipv4->val = (struct ibv_flow_ipv4_ext_filter){
764 .src_ip = spec->hdr.src_addr,
765 .dst_ip = spec->hdr.dst_addr,
766 .proto = spec->hdr.next_proto_id,
767 .tos = spec->hdr.type_of_service,
769 ipv4->mask = (struct ibv_flow_ipv4_ext_filter){
770 .src_ip = mask->hdr.src_addr,
771 .dst_ip = mask->hdr.dst_addr,
772 .proto = mask->hdr.next_proto_id,
773 .tos = mask->hdr.type_of_service,
775 /* Remove unwanted bits from values. */
776 ipv4->val.src_ip &= ipv4->mask.src_ip;
777 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
778 ipv4->val.proto &= ipv4->mask.proto;
779 ipv4->val.tos &= ipv4->mask.tos;
784 * Convert IPv6 item to Verbs specification.
787 * Item specification.
788 * @param default_mask[in]
789 * Default bit-masks to use when item->mask is not provided.
790 * @param data[in, out]
794 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
795 const void *default_mask,
798 const struct rte_flow_item_ipv6 *spec = item->spec;
799 const struct rte_flow_item_ipv6 *mask = item->mask;
800 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
801 struct ibv_flow_spec_ipv6 *ipv6;
802 unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
805 ++parser->ibv_attr->num_of_specs;
806 parser->hash_fields = (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6);
807 ipv6 = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
808 *ipv6 = (struct ibv_flow_spec_ipv6) {
809 .type = parser->inner | IBV_FLOW_SPEC_IPV6,
816 memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
817 RTE_DIM(ipv6->val.src_ip));
818 memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
819 RTE_DIM(ipv6->val.dst_ip));
820 memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
821 RTE_DIM(ipv6->mask.src_ip));
822 memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
823 RTE_DIM(ipv6->mask.dst_ip));
824 ipv6->mask.flow_label = mask->hdr.vtc_flow;
825 ipv6->mask.next_hdr = mask->hdr.proto;
826 ipv6->mask.hop_limit = mask->hdr.hop_limits;
827 /* Remove unwanted bits from values. */
828 for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
829 ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
830 ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
832 ipv6->val.flow_label &= ipv6->mask.flow_label;
833 ipv6->val.next_hdr &= ipv6->mask.next_hdr;
834 ipv6->val.hop_limit &= ipv6->mask.hop_limit;
839 * Convert UDP item to Verbs specification.
842 * Item specification.
843 * @param default_mask[in]
844 * Default bit-masks to use when item->mask is not provided.
845 * @param data[in, out]
849 mlx5_flow_create_udp(const struct rte_flow_item *item,
850 const void *default_mask,
853 const struct rte_flow_item_udp *spec = item->spec;
854 const struct rte_flow_item_udp *mask = item->mask;
855 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
856 struct ibv_flow_spec_tcp_udp *udp;
857 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
859 ++parser->ibv_attr->num_of_specs;
860 parser->hash_fields |= (IBV_RX_HASH_SRC_PORT_UDP |
861 IBV_RX_HASH_DST_PORT_UDP);
862 udp = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
863 *udp = (struct ibv_flow_spec_tcp_udp) {
864 .type = parser->inner | IBV_FLOW_SPEC_UDP,
871 udp->val.dst_port = spec->hdr.dst_port;
872 udp->val.src_port = spec->hdr.src_port;
873 udp->mask.dst_port = mask->hdr.dst_port;
874 udp->mask.src_port = mask->hdr.src_port;
875 /* Remove unwanted bits from values. */
876 udp->val.src_port &= udp->mask.src_port;
877 udp->val.dst_port &= udp->mask.dst_port;
882 * Convert TCP item to Verbs specification.
885 * Item specification.
886 * @param default_mask[in]
887 * Default bit-masks to use when item->mask is not provided.
888 * @param data[in, out]
892 mlx5_flow_create_tcp(const struct rte_flow_item *item,
893 const void *default_mask,
896 const struct rte_flow_item_tcp *spec = item->spec;
897 const struct rte_flow_item_tcp *mask = item->mask;
898 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
899 struct ibv_flow_spec_tcp_udp *tcp;
900 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
902 ++parser->ibv_attr->num_of_specs;
903 parser->hash_fields |= (IBV_RX_HASH_SRC_PORT_TCP |
904 IBV_RX_HASH_DST_PORT_TCP);
905 tcp = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
906 *tcp = (struct ibv_flow_spec_tcp_udp) {
907 .type = parser->inner | IBV_FLOW_SPEC_TCP,
914 tcp->val.dst_port = spec->hdr.dst_port;
915 tcp->val.src_port = spec->hdr.src_port;
916 tcp->mask.dst_port = mask->hdr.dst_port;
917 tcp->mask.src_port = mask->hdr.src_port;
918 /* Remove unwanted bits from values. */
919 tcp->val.src_port &= tcp->mask.src_port;
920 tcp->val.dst_port &= tcp->mask.dst_port;
925 * Convert VXLAN item to Verbs specification.
928 * Item specification.
929 * @param default_mask[in]
930 * Default bit-masks to use when item->mask is not provided.
931 * @param data[in, out]
935 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
936 const void *default_mask,
939 const struct rte_flow_item_vxlan *spec = item->spec;
940 const struct rte_flow_item_vxlan *mask = item->mask;
941 struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
942 struct ibv_flow_spec_tunnel *vxlan;
943 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
949 ++parser->ibv_attr->num_of_specs;
951 vxlan = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
952 *vxlan = (struct ibv_flow_spec_tunnel) {
953 .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
956 parser->inner = IBV_FLOW_SPEC_INNER;
961 memcpy(&id.vni[1], spec->vni, 3);
962 vxlan->val.tunnel_id = id.vlan_id;
963 memcpy(&id.vni[1], mask->vni, 3);
964 vxlan->mask.tunnel_id = id.vlan_id;
965 /* Remove unwanted bits from values. */
966 vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
971 * Convert mark/flag action to Verbs specification.
974 * Internal parser structure.
979 mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
981 struct ibv_flow_spec_action_tag *tag;
982 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
984 assert(parser->mark);
985 tag = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
986 *tag = (struct ibv_flow_spec_action_tag){
987 .type = IBV_FLOW_SPEC_ACTION_TAG,
989 .tag_id = mlx5_flow_mark_set(mark_id),
991 ++parser->ibv_attr->num_of_specs;
992 parser->offset += size;
997 * Complete flow rule creation with a drop queue.
1000 * Pointer to private structure.
1002 * Internal parser structure.
1004 * Perform verbose error reporting if not NULL.
1007 * A flow if the rule could be created.
1009 static struct rte_flow *
1010 priv_flow_create_action_queue_drop(struct priv *priv,
1011 struct mlx5_flow_parse *parser,
1012 struct rte_flow_error *error)
1014 struct rte_flow *rte_flow;
1015 struct ibv_flow_spec_action_drop *drop;
1016 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1020 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
1022 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1023 NULL, "cannot allocate flow memory");
1027 drop = (void *)((uintptr_t)parser->ibv_attr + parser->offset);
1028 *drop = (struct ibv_flow_spec_action_drop){
1029 .type = IBV_FLOW_SPEC_ACTION_DROP,
1032 ++parser->ibv_attr->num_of_specs;
1033 parser->offset += sizeof(struct ibv_flow_spec_action_drop);
1034 rte_flow->ibv_attr = parser->ibv_attr;
1035 if (!priv->dev->data->dev_started)
1037 rte_flow->drxq.hrxq.qp = priv->flow_drop_queue->qp;
1038 rte_flow->ibv_flow = ibv_create_flow(rte_flow->drxq.hrxq.qp,
1039 rte_flow->ibv_attr);
1040 if (!rte_flow->ibv_flow) {
1041 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1042 NULL, "flow rule creation failure");
1053 * Complete flow rule creation.
1056 * Pointer to private structure.
1058 * MLX5 flow parser attributes (filled by mlx5_flow_validate()).
1060 * Perform verbose error reporting if not NULL.
1063 * A flow if the rule could be created.
1065 static struct rte_flow *
1066 priv_flow_create_action_queue(struct priv *priv,
1067 struct mlx5_flow_parse *parser,
1068 struct rte_flow_error *error)
1070 struct rte_flow *rte_flow;
1075 assert(!parser->drop);
1076 rte_flow = rte_calloc(__func__, 1,
1078 parser->queues_n * sizeof(uint16_t),
1081 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1082 NULL, "cannot allocate flow memory");
1085 rte_flow->mark = parser->mark;
1086 rte_flow->ibv_attr = parser->ibv_attr;
1087 rte_flow->queues = (uint16_t (*)[])(rte_flow + 1);
1088 memcpy(rte_flow->queues, parser->queues,
1089 parser->queues_n * sizeof(uint16_t));
1090 rte_flow->queues_n = parser->queues_n;
1091 rte_flow->frxq.hash_fields = parser->hash_fields;
1092 rte_flow->frxq.hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1093 rss_hash_default_key_len,
1094 parser->hash_fields,
1095 (*rte_flow->queues),
1096 rte_flow->queues_n);
1097 if (!rte_flow->frxq.hrxq) {
1098 rte_flow->frxq.hrxq =
1099 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1100 rss_hash_default_key_len,
1101 parser->hash_fields,
1102 (*rte_flow->queues),
1103 rte_flow->queues_n);
1104 if (!rte_flow->frxq.hrxq) {
1105 rte_flow_error_set(error, ENOMEM,
1106 RTE_FLOW_ERROR_TYPE_HANDLE,
1107 NULL, "cannot create hash rxq");
1111 for (i = 0; i != parser->queues_n; ++i) {
1112 struct mlx5_rxq_data *q =
1113 (*priv->rxqs)[parser->queues[i]];
1115 q->mark |= parser->mark;
1117 if (!priv->dev->data->dev_started)
1119 rte_flow->ibv_flow = ibv_create_flow(rte_flow->frxq.hrxq->qp,
1120 rte_flow->ibv_attr);
1121 if (!rte_flow->ibv_flow) {
1122 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1123 NULL, "flow rule creation failure");
1129 if (rte_flow->frxq.hrxq)
1130 mlx5_priv_hrxq_release(priv, rte_flow->frxq.hrxq);
1139 * Pointer to private structure.
1141 * Flow rule attributes.
1142 * @param[in] pattern
1143 * Pattern specification (list terminated by the END pattern item).
1144 * @param[in] actions
1145 * Associated actions (list terminated by the END action).
1147 * Perform verbose error reporting if not NULL.
1148 * @param[in,out] parser
1149 * MLX5 parser structure.
1152 * 0 on success, negative errno value on failure.
1155 priv_flow_validate(struct priv *priv,
1156 const struct rte_flow_attr *attr,
1157 const struct rte_flow_item items[],
1158 const struct rte_flow_action actions[],
1159 struct rte_flow_error *error,
1160 struct mlx5_flow_parse *parser)
1164 err = priv_flow_convert(priv, attr, items, actions, error, parser);
1168 parser->offset += sizeof(struct ibv_flow_spec_action_tag);
1169 parser->ibv_attr = rte_malloc(__func__, parser->offset, 0);
1170 if (!parser->ibv_attr) {
1171 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1172 NULL, "cannot allocate ibv_attr memory");
1176 *parser->ibv_attr = (struct ibv_flow_attr){
1177 .type = IBV_FLOW_ATTR_NORMAL,
1178 .size = sizeof(struct ibv_flow_attr),
1179 .priority = attr->priority,
1184 err = priv_flow_convert(priv, attr, items, actions, error, parser);
1185 if (err || parser->create)
1188 mlx5_flow_create_flag_mark(parser, parser->mark_id);
1191 if (parser->ibv_attr)
1192 rte_free(parser->ibv_attr);
1200 * Pointer to private structure.
1202 * Pointer to a TAILQ flow list.
1204 * Flow rule attributes.
1205 * @param[in] pattern
1206 * Pattern specification (list terminated by the END pattern item).
1207 * @param[in] actions
1208 * Associated actions (list terminated by the END action).
1210 * Perform verbose error reporting if not NULL.
1213 * A flow on success, NULL otherwise.
1215 static struct rte_flow *
1216 priv_flow_create(struct priv *priv,
1217 struct mlx5_flows *list,
1218 const struct rte_flow_attr *attr,
1219 const struct rte_flow_item items[],
1220 const struct rte_flow_action actions[],
1221 struct rte_flow_error *error)
1223 struct mlx5_flow_parse parser = { .create = 1, };
1224 struct rte_flow *flow;
1227 err = priv_flow_validate(priv, attr, items, actions, error, &parser);
1231 flow = priv_flow_create_action_queue_drop(priv, &parser, error);
1233 flow = priv_flow_create_action_queue(priv, &parser, error);
1236 TAILQ_INSERT_TAIL(list, flow, next);
1237 DEBUG("Flow created %p", (void *)flow);
1240 if (parser.ibv_attr)
1241 rte_free(parser.ibv_attr);
1246 * Validate a flow supported by the NIC.
1248 * @see rte_flow_validate()
1252 mlx5_flow_validate(struct rte_eth_dev *dev,
1253 const struct rte_flow_attr *attr,
1254 const struct rte_flow_item items[],
1255 const struct rte_flow_action actions[],
1256 struct rte_flow_error *error)
1258 struct priv *priv = dev->data->dev_private;
1260 struct mlx5_flow_parse parser = { .create = 0, };
1263 ret = priv_flow_validate(priv, attr, items, actions, error, &parser);
1271 * @see rte_flow_create()
1275 mlx5_flow_create(struct rte_eth_dev *dev,
1276 const struct rte_flow_attr *attr,
1277 const struct rte_flow_item items[],
1278 const struct rte_flow_action actions[],
1279 struct rte_flow_error *error)
1281 struct priv *priv = dev->data->dev_private;
1282 struct rte_flow *flow;
1285 flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
1295 * Pointer to private structure.
1297 * Pointer to a TAILQ flow list.
1302 priv_flow_destroy(struct priv *priv,
1303 struct mlx5_flows *list,
1304 struct rte_flow *flow)
1310 if (flow->drop || !flow->mark)
1312 queues = flow->frxq.hrxq->ind_table->queues;
1313 queues_n = flow->frxq.hrxq->ind_table->queues_n;
1314 for (i = 0; i != queues_n; ++i) {
1315 struct rte_flow *tmp;
1316 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[i]];
1320 * To remove the mark from the queue, the queue must not be
1321 * present in any other marked flow (RSS or not).
1323 TAILQ_FOREACH(tmp, list, next) {
1329 (j != tmp->frxq.hrxq->ind_table->queues_n) &&
1332 if (tmp->frxq.hrxq->ind_table->queues[j] ==
1336 rxq_data->mark = mark;
1340 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1342 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1343 TAILQ_REMOVE(list, flow, next);
1344 rte_free(flow->ibv_attr);
1345 DEBUG("Flow destroyed %p", (void *)flow);
1350 * Destroy all flows.
1353 * Pointer to private structure.
1355 * Pointer to a TAILQ flow list.
1358 priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
1360 while (!TAILQ_EMPTY(list)) {
1361 struct rte_flow *flow;
1363 flow = TAILQ_FIRST(list);
1364 priv_flow_destroy(priv, list, flow);
1369 * Create drop queue.
1372 * Pointer to private structure.
1378 priv_flow_create_drop_queue(struct priv *priv)
1380 struct mlx5_hrxq_drop *fdq = NULL;
1384 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
1386 WARN("cannot allocate memory for drop queue");
1389 fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
1391 WARN("cannot allocate CQ for drop queue");
1394 fdq->wq = ibv_create_wq(priv->ctx,
1395 &(struct ibv_wq_init_attr){
1396 .wq_type = IBV_WQT_RQ,
1403 WARN("cannot allocate WQ for drop queue");
1406 fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
1407 &(struct ibv_rwq_ind_table_init_attr){
1408 .log_ind_tbl_size = 0,
1409 .ind_tbl = &fdq->wq,
1412 if (!fdq->ind_table) {
1413 WARN("cannot allocate indirection table for drop queue");
1416 fdq->qp = ibv_create_qp_ex(priv->ctx,
1417 &(struct ibv_qp_init_attr_ex){
1418 .qp_type = IBV_QPT_RAW_PACKET,
1420 IBV_QP_INIT_ATTR_PD |
1421 IBV_QP_INIT_ATTR_IND_TABLE |
1422 IBV_QP_INIT_ATTR_RX_HASH,
1423 .rx_hash_conf = (struct ibv_rx_hash_conf){
1425 IBV_RX_HASH_FUNC_TOEPLITZ,
1426 .rx_hash_key_len = rss_hash_default_key_len,
1427 .rx_hash_key = rss_hash_default_key,
1428 .rx_hash_fields_mask = 0,
1430 .rwq_ind_tbl = fdq->ind_table,
1434 WARN("cannot allocate QP for drop queue");
1437 priv->flow_drop_queue = fdq;
1441 claim_zero(ibv_destroy_qp(fdq->qp));
1443 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1445 claim_zero(ibv_destroy_wq(fdq->wq));
1447 claim_zero(ibv_destroy_cq(fdq->cq));
1450 priv->flow_drop_queue = NULL;
1455 * Delete drop queue.
1458 * Pointer to private structure.
1461 priv_flow_delete_drop_queue(struct priv *priv)
1463 struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
1468 claim_zero(ibv_destroy_qp(fdq->qp));
1470 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1472 claim_zero(ibv_destroy_wq(fdq->wq));
1474 claim_zero(ibv_destroy_cq(fdq->cq));
1476 priv->flow_drop_queue = NULL;
1483 * Pointer to private structure.
1485 * Pointer to a TAILQ flow list.
1488 priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
1490 struct rte_flow *flow;
1492 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
1493 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1494 flow->ibv_flow = NULL;
1495 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1496 flow->frxq.hrxq = NULL;
1499 struct mlx5_ind_table_ibv *ind_tbl =
1500 flow->frxq.hrxq->ind_table;
1502 for (n = 0; n < ind_tbl->queues_n; ++n)
1503 (*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
1505 DEBUG("Flow %p removed", (void *)flow);
1513 * Pointer to private structure.
1515 * Pointer to a TAILQ flow list.
1518 * 0 on success, a errno value otherwise and rte_errno is set.
1521 priv_flow_start(struct priv *priv, struct mlx5_flows *list)
1523 struct rte_flow *flow;
1525 TAILQ_FOREACH(flow, list, next) {
1526 if (flow->frxq.hrxq)
1529 mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1530 rss_hash_default_key_len,
1531 flow->frxq.hash_fields,
1534 if (flow->frxq.hrxq)
1537 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1538 rss_hash_default_key_len,
1539 flow->frxq.hash_fields,
1542 if (!flow->frxq.hrxq) {
1543 DEBUG("Flow %p cannot be applied",
1549 flow->ibv_flow = ibv_create_flow(flow->frxq.hrxq->qp,
1551 if (!flow->ibv_flow) {
1552 DEBUG("Flow %p cannot be applied", (void *)flow);
1556 DEBUG("Flow %p applied", (void *)flow);
1561 n < flow->frxq.hrxq->ind_table->queues_n;
1564 flow->frxq.hrxq->ind_table->queues[n];
1565 (*priv->rxqs)[idx]->mark = 1;
1573 * Verify the flow list is empty
1576 * Pointer to private structure.
1578 * @return the number of flows not released.
1581 priv_flow_verify(struct priv *priv)
1583 struct rte_flow *flow;
1586 TAILQ_FOREACH(flow, &priv->flows, next) {
1587 DEBUG("%p: flow %p still referenced", (void *)priv,
1595 * Enable a control flow configured from the control plane.
1598 * Pointer to Ethernet device.
1600 * An Ethernet flow spec to apply.
1602 * An Ethernet flow mask to apply.
1604 * A VLAN flow spec to apply.
1606 * A VLAN flow mask to apply.
1612 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1613 struct rte_flow_item_eth *eth_spec,
1614 struct rte_flow_item_eth *eth_mask,
1615 struct rte_flow_item_vlan *vlan_spec,
1616 struct rte_flow_item_vlan *vlan_mask)
1618 struct priv *priv = dev->data->dev_private;
1619 const struct rte_flow_attr attr = {
1621 .priority = MLX5_CTRL_FLOW_PRIORITY,
1623 struct rte_flow_item items[] = {
1625 .type = RTE_FLOW_ITEM_TYPE_ETH,
1631 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
1632 RTE_FLOW_ITEM_TYPE_END,
1638 .type = RTE_FLOW_ITEM_TYPE_END,
1641 struct rte_flow_action actions[] = {
1643 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1644 .conf = &(struct rte_flow_action_queue){
1649 .type = RTE_FLOW_ACTION_TYPE_END,
1652 struct rte_flow *flow;
1653 struct rte_flow_error error;
1655 flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
1663 * Enable a flow control configured from the control plane.
1666 * Pointer to Ethernet device.
1668 * An Ethernet flow spec to apply.
1670 * An Ethernet flow mask to apply.
1676 mlx5_ctrl_flow(struct rte_eth_dev *dev,
1677 struct rte_flow_item_eth *eth_spec,
1678 struct rte_flow_item_eth *eth_mask)
1680 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
1686 * @see rte_flow_destroy()
1690 mlx5_flow_destroy(struct rte_eth_dev *dev,
1691 struct rte_flow *flow,
1692 struct rte_flow_error *error)
1694 struct priv *priv = dev->data->dev_private;
1698 priv_flow_destroy(priv, &priv->flows, flow);
1704 * Destroy all flows.
1706 * @see rte_flow_flush()
1710 mlx5_flow_flush(struct rte_eth_dev *dev,
1711 struct rte_flow_error *error)
1713 struct priv *priv = dev->data->dev_private;
1717 priv_flow_flush(priv, &priv->flows);
1725 * @see rte_flow_isolate()
1729 mlx5_flow_isolate(struct rte_eth_dev *dev,
1731 struct rte_flow_error *error)
1733 struct priv *priv = dev->data->dev_private;
1736 if (dev->data->dev_started) {
1737 rte_flow_error_set(error, EBUSY,
1738 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1740 "port must be stopped first");
1744 priv->isolated = !!enable;