4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
38 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #pragma GCC diagnostic ignored "-Wpedantic"
42 #include <infiniband/verbs.h>
44 #pragma GCC diagnostic error "-Wpedantic"
47 #include <rte_ethdev.h>
49 #include <rte_flow_driver.h>
50 #include <rte_malloc.h>
55 /* Define minimal priority for control plane flows. */
56 #define MLX5_CTRL_FLOW_PRIORITY 4
59 mlx5_flow_create_eth(const struct rte_flow_item *item,
60 const void *default_mask,
64 mlx5_flow_create_vlan(const struct rte_flow_item *item,
65 const void *default_mask,
69 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
70 const void *default_mask,
74 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
75 const void *default_mask,
79 mlx5_flow_create_udp(const struct rte_flow_item *item,
80 const void *default_mask,
84 mlx5_flow_create_tcp(const struct rte_flow_item *item,
85 const void *default_mask,
89 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
90 const void *default_mask,
93 /** Structure for Drop queue. */
94 struct mlx5_hrxq_drop {
95 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
96 struct ibv_qp *qp; /**< Verbs queue pair. */
97 struct ibv_wq *wq; /**< Verbs work queue. */
98 struct ibv_cq *cq; /**< Verbs completion queue. */
101 /* Flows structures. */
103 uint64_t hash_fields; /**< Fields that participate in the hash. */
104 struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
107 /* Drop flows structures. */
108 struct mlx5_flow_drop {
109 struct mlx5_hrxq_drop hrxq; /**< Drop hash Rx queue. */
113 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
114 uint32_t mark:1; /**< Set if the flow is marked. */
115 uint32_t drop:1; /**< Drop queue. */
116 struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
117 struct ibv_flow *ibv_flow; /**< Verbs flow. */
118 uint16_t queues_n; /**< Number of entries in queue[]. */
119 uint16_t (*queues)[]; /**< Queues indexes to use. */
121 struct mlx5_flow frxq; /**< Flow with Rx queue. */
122 struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
126 /** Static initializer for items. */
128 (const enum rte_flow_item_type []){ \
129 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
132 /** Structure to generate a simple graph of layers supported by the NIC. */
133 struct mlx5_flow_items {
134 /** List of possible actions for these items. */
135 const enum rte_flow_action_type *const actions;
136 /** Bit-masks corresponding to the possibilities for the item. */
139 * Default bit-masks to use when item->mask is not provided. When
140 * \default_mask is also NULL, the full supported bit-mask (\mask) is
143 const void *default_mask;
144 /** Bit-masks size in bytes. */
145 const unsigned int mask_sz;
147 * Conversion function from rte_flow to NIC specific flow.
150 * rte_flow item to convert.
151 * @param default_mask
152 * Default bit-masks to use when item->mask is not provided.
154 * Internal structure to store the conversion.
157 * 0 on success, negative value otherwise.
159 int (*convert)(const struct rte_flow_item *item,
160 const void *default_mask,
162 /** Size in bytes of the destination structure. */
163 const unsigned int dst_sz;
164 /** List of possible following items. */
165 const enum rte_flow_item_type *const items;
168 /** Valid action for this PMD. */
169 static const enum rte_flow_action_type valid_actions[] = {
170 RTE_FLOW_ACTION_TYPE_DROP,
171 RTE_FLOW_ACTION_TYPE_QUEUE,
172 RTE_FLOW_ACTION_TYPE_MARK,
173 RTE_FLOW_ACTION_TYPE_FLAG,
174 RTE_FLOW_ACTION_TYPE_END,
177 /** Graph of supported items and associated actions. */
178 static const struct mlx5_flow_items mlx5_flow_items[] = {
179 [RTE_FLOW_ITEM_TYPE_END] = {
180 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
181 RTE_FLOW_ITEM_TYPE_VXLAN),
183 [RTE_FLOW_ITEM_TYPE_ETH] = {
184 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
185 RTE_FLOW_ITEM_TYPE_IPV4,
186 RTE_FLOW_ITEM_TYPE_IPV6),
187 .actions = valid_actions,
188 .mask = &(const struct rte_flow_item_eth){
189 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
190 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
193 .default_mask = &rte_flow_item_eth_mask,
194 .mask_sz = sizeof(struct rte_flow_item_eth),
195 .convert = mlx5_flow_create_eth,
196 .dst_sz = sizeof(struct ibv_flow_spec_eth),
198 [RTE_FLOW_ITEM_TYPE_VLAN] = {
199 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
200 RTE_FLOW_ITEM_TYPE_IPV6),
201 .actions = valid_actions,
202 .mask = &(const struct rte_flow_item_vlan){
205 .default_mask = &rte_flow_item_vlan_mask,
206 .mask_sz = sizeof(struct rte_flow_item_vlan),
207 .convert = mlx5_flow_create_vlan,
210 [RTE_FLOW_ITEM_TYPE_IPV4] = {
211 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
212 RTE_FLOW_ITEM_TYPE_TCP),
213 .actions = valid_actions,
214 .mask = &(const struct rte_flow_item_ipv4){
218 .type_of_service = -1,
222 .default_mask = &rte_flow_item_ipv4_mask,
223 .mask_sz = sizeof(struct rte_flow_item_ipv4),
224 .convert = mlx5_flow_create_ipv4,
225 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
227 [RTE_FLOW_ITEM_TYPE_IPV6] = {
228 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
229 RTE_FLOW_ITEM_TYPE_TCP),
230 .actions = valid_actions,
231 .mask = &(const struct rte_flow_item_ipv6){
234 0xff, 0xff, 0xff, 0xff,
235 0xff, 0xff, 0xff, 0xff,
236 0xff, 0xff, 0xff, 0xff,
237 0xff, 0xff, 0xff, 0xff,
240 0xff, 0xff, 0xff, 0xff,
241 0xff, 0xff, 0xff, 0xff,
242 0xff, 0xff, 0xff, 0xff,
243 0xff, 0xff, 0xff, 0xff,
250 .default_mask = &rte_flow_item_ipv6_mask,
251 .mask_sz = sizeof(struct rte_flow_item_ipv6),
252 .convert = mlx5_flow_create_ipv6,
253 .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
255 [RTE_FLOW_ITEM_TYPE_UDP] = {
256 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
257 .actions = valid_actions,
258 .mask = &(const struct rte_flow_item_udp){
264 .default_mask = &rte_flow_item_udp_mask,
265 .mask_sz = sizeof(struct rte_flow_item_udp),
266 .convert = mlx5_flow_create_udp,
267 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
269 [RTE_FLOW_ITEM_TYPE_TCP] = {
270 .actions = valid_actions,
271 .mask = &(const struct rte_flow_item_tcp){
277 .default_mask = &rte_flow_item_tcp_mask,
278 .mask_sz = sizeof(struct rte_flow_item_tcp),
279 .convert = mlx5_flow_create_tcp,
280 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
282 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
283 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
284 .actions = valid_actions,
285 .mask = &(const struct rte_flow_item_vxlan){
286 .vni = "\xff\xff\xff",
288 .default_mask = &rte_flow_item_vxlan_mask,
289 .mask_sz = sizeof(struct rte_flow_item_vxlan),
290 .convert = mlx5_flow_create_vxlan,
291 .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
295 /** Structure to pass to the conversion function. */
296 struct mlx5_flow_parse {
297 struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
298 unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
299 uint32_t inner; /**< Set once VXLAN is encountered. */
300 uint32_t create:1; /**< Leave allocated resources on exit. */
301 uint32_t queue:1; /**< Target is a receive queue. */
302 uint32_t drop:1; /**< Target is a drop queue. */
303 uint32_t mark:1; /**< Mark is present in the flow. */
304 uint32_t mark_id; /**< Mark identifier. */
305 uint64_t hash_fields; /**< Fields that participate in the hash. */
306 uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
307 uint16_t queues_n; /**< Number of entries in queue[]. */
310 static const struct rte_flow_ops mlx5_flow_ops = {
311 .validate = mlx5_flow_validate,
312 .create = mlx5_flow_create,
313 .destroy = mlx5_flow_destroy,
314 .flush = mlx5_flow_flush,
316 .isolate = mlx5_flow_isolate,
320 * Manage filter operations.
323 * Pointer to Ethernet device structure.
327 * Operation to perform.
329 * Pointer to operation-specific structure.
332 * 0 on success, negative errno value on failure.
335 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
336 enum rte_filter_type filter_type,
337 enum rte_filter_op filter_op,
342 if (filter_type == RTE_ETH_FILTER_GENERIC) {
343 if (filter_op != RTE_ETH_FILTER_GET)
345 *(const void **)arg = &mlx5_flow_ops;
348 ERROR("%p: filter type (%d) not supported",
349 (void *)dev, filter_type);
354 * Check support for a given item.
357 * Item specification.
359 * Bit-masks covering supported fields to compare with spec, last and mask in
362 * Bit-Mask size in bytes.
368 mlx5_flow_item_validate(const struct rte_flow_item *item,
369 const uint8_t *mask, unsigned int size)
373 if (!item->spec && (item->mask || item->last))
375 if (item->spec && !item->mask) {
377 const uint8_t *spec = item->spec;
379 for (i = 0; i < size; ++i)
380 if ((spec[i] | mask[i]) != mask[i])
383 if (item->last && !item->mask) {
385 const uint8_t *spec = item->last;
387 for (i = 0; i < size; ++i)
388 if ((spec[i] | mask[i]) != mask[i])
393 const uint8_t *spec = item->mask;
395 for (i = 0; i < size; ++i)
396 if ((spec[i] | mask[i]) != mask[i])
399 if (item->spec && item->last) {
402 const uint8_t *apply = mask;
407 for (i = 0; i < size; ++i) {
408 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
409 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
411 ret = memcmp(spec, last, size);
417 * Validate and convert a flow supported by the NIC.
420 * Pointer to private structure.
422 * Flow rule attributes.
424 * Pattern specification (list terminated by the END pattern item).
426 * Associated actions (list terminated by the END action).
428 * Perform verbose error reporting if not NULL.
429 * @param[in, out] flow
430 * Flow structure to update.
433 * 0 on success, a negative errno value otherwise and rte_errno is set.
436 priv_flow_convert(struct priv *priv,
437 const struct rte_flow_attr *attr,
438 const struct rte_flow_item items[],
439 const struct rte_flow_action actions[],
440 struct rte_flow_error *error,
441 struct mlx5_flow_parse *flow)
443 const struct mlx5_flow_items *cur_item = mlx5_flow_items;
446 *flow = (struct mlx5_flow_parse){
447 .ibv_attr = flow->ibv_attr,
448 .create = flow->create,
449 .offset = sizeof(struct ibv_flow_attr),
450 .mark_id = MLX5_FLOW_MARK_DEFAULT,
453 rte_flow_error_set(error, ENOTSUP,
454 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
456 "groups are not supported");
459 if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
460 rte_flow_error_set(error, ENOTSUP,
461 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
463 "priorities are not supported");
467 rte_flow_error_set(error, ENOTSUP,
468 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
470 "egress is not supported");
473 if (!attr->ingress) {
474 rte_flow_error_set(error, ENOTSUP,
475 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
477 "only ingress is supported");
480 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
481 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
483 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
485 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
486 const struct rte_flow_action_queue *queue =
487 (const struct rte_flow_action_queue *)
492 if (!queue || (queue->index > (priv->rxqs_n - 1)))
493 goto exit_action_not_supported;
494 for (n = 0; n < flow->queues_n; ++n) {
495 if (flow->queues[n] == queue->index) {
500 if (flow->queues_n > 1 && !found) {
501 rte_flow_error_set(error, ENOTSUP,
502 RTE_FLOW_ERROR_TYPE_ACTION,
504 "queue action not in RSS queues");
510 flow->queues[0] = queue->index;
512 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
513 const struct rte_flow_action_rss *rss =
514 (const struct rte_flow_action_rss *)
518 if (!rss || !rss->num) {
519 rte_flow_error_set(error, EINVAL,
520 RTE_FLOW_ERROR_TYPE_ACTION,
525 if (flow->queues_n == 1) {
528 assert(flow->queues_n);
529 for (n = 0; n < rss->num; ++n) {
530 if (flow->queues[0] ==
537 rte_flow_error_set(error, ENOTSUP,
538 RTE_FLOW_ERROR_TYPE_ACTION,
540 "queue action not in RSS"
545 for (n = 0; n < rss->num; ++n) {
546 if (rss->queue[n] >= priv->rxqs_n) {
547 rte_flow_error_set(error, EINVAL,
548 RTE_FLOW_ERROR_TYPE_ACTION,
550 "queue id > number of"
556 for (n = 0; n < rss->num; ++n)
557 flow->queues[n] = rss->queue[n];
558 flow->queues_n = rss->num;
559 } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
560 const struct rte_flow_action_mark *mark =
561 (const struct rte_flow_action_mark *)
565 rte_flow_error_set(error, EINVAL,
566 RTE_FLOW_ERROR_TYPE_ACTION,
568 "mark must be defined");
570 } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
571 rte_flow_error_set(error, ENOTSUP,
572 RTE_FLOW_ERROR_TYPE_ACTION,
574 "mark must be between 0"
579 flow->mark_id = mark->id;
580 } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
583 goto exit_action_not_supported;
586 if (flow->mark && !flow->ibv_attr && !flow->drop)
587 flow->offset += sizeof(struct ibv_flow_spec_action_tag);
588 if (!flow->ibv_attr && flow->drop)
589 flow->offset += sizeof(struct ibv_flow_spec_action_drop);
590 if (!flow->queue && !flow->drop) {
591 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
592 NULL, "no valid action");
595 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
596 const struct mlx5_flow_items *token = NULL;
600 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
604 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
606 if (cur_item->items[i] == items->type) {
607 token = &mlx5_flow_items[items->type];
612 goto exit_item_not_supported;
614 err = mlx5_flow_item_validate(items,
615 (const uint8_t *)cur_item->mask,
618 goto exit_item_not_supported;
619 if (flow->ibv_attr && cur_item->convert) {
620 err = cur_item->convert(items,
621 (cur_item->default_mask ?
622 cur_item->default_mask :
626 goto exit_item_not_supported;
627 } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
629 rte_flow_error_set(error, ENOTSUP,
630 RTE_FLOW_ERROR_TYPE_ITEM,
632 "cannot recognize multiple"
633 " VXLAN encapsulations");
638 flow->offset += cur_item->dst_sz;
641 exit_item_not_supported:
642 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
643 items, "item not supported");
645 exit_action_not_supported:
646 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
647 actions, "action not supported");
652 * Convert Ethernet item to Verbs specification.
655 * Item specification.
656 * @param default_mask[in]
657 * Default bit-masks to use when item->mask is not provided.
658 * @param data[in, out]
662 mlx5_flow_create_eth(const struct rte_flow_item *item,
663 const void *default_mask,
666 const struct rte_flow_item_eth *spec = item->spec;
667 const struct rte_flow_item_eth *mask = item->mask;
668 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
669 struct ibv_flow_spec_eth *eth;
670 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
673 ++flow->ibv_attr->num_of_specs;
674 flow->ibv_attr->priority = 2;
675 flow->hash_fields = 0;
676 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
677 *eth = (struct ibv_flow_spec_eth) {
678 .type = flow->inner | IBV_FLOW_SPEC_ETH,
685 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
686 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
687 eth->val.ether_type = spec->type;
688 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
689 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
690 eth->mask.ether_type = mask->type;
691 /* Remove unwanted bits from values. */
692 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
693 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
694 eth->val.src_mac[i] &= eth->mask.src_mac[i];
696 eth->val.ether_type &= eth->mask.ether_type;
701 * Convert VLAN item to Verbs specification.
704 * Item specification.
705 * @param default_mask[in]
706 * Default bit-masks to use when item->mask is not provided.
707 * @param data[in, out]
711 mlx5_flow_create_vlan(const struct rte_flow_item *item,
712 const void *default_mask,
715 const struct rte_flow_item_vlan *spec = item->spec;
716 const struct rte_flow_item_vlan *mask = item->mask;
717 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
718 struct ibv_flow_spec_eth *eth;
719 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
721 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
726 eth->val.vlan_tag = spec->tci;
727 eth->mask.vlan_tag = mask->tci;
728 eth->val.vlan_tag &= eth->mask.vlan_tag;
733 * Convert IPv4 item to Verbs specification.
736 * Item specification.
737 * @param default_mask[in]
738 * Default bit-masks to use when item->mask is not provided.
739 * @param data[in, out]
743 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
744 const void *default_mask,
747 const struct rte_flow_item_ipv4 *spec = item->spec;
748 const struct rte_flow_item_ipv4 *mask = item->mask;
749 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
750 struct ibv_flow_spec_ipv4_ext *ipv4;
751 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
753 ++flow->ibv_attr->num_of_specs;
754 flow->ibv_attr->priority = 1;
755 flow->hash_fields = (IBV_RX_HASH_SRC_IPV4 |
756 IBV_RX_HASH_DST_IPV4);
757 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
758 *ipv4 = (struct ibv_flow_spec_ipv4_ext) {
759 .type = flow->inner | IBV_FLOW_SPEC_IPV4_EXT,
766 ipv4->val = (struct ibv_flow_ipv4_ext_filter){
767 .src_ip = spec->hdr.src_addr,
768 .dst_ip = spec->hdr.dst_addr,
769 .proto = spec->hdr.next_proto_id,
770 .tos = spec->hdr.type_of_service,
772 ipv4->mask = (struct ibv_flow_ipv4_ext_filter){
773 .src_ip = mask->hdr.src_addr,
774 .dst_ip = mask->hdr.dst_addr,
775 .proto = mask->hdr.next_proto_id,
776 .tos = mask->hdr.type_of_service,
778 /* Remove unwanted bits from values. */
779 ipv4->val.src_ip &= ipv4->mask.src_ip;
780 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
781 ipv4->val.proto &= ipv4->mask.proto;
782 ipv4->val.tos &= ipv4->mask.tos;
787 * Convert IPv6 item to Verbs specification.
790 * Item specification.
791 * @param default_mask[in]
792 * Default bit-masks to use when item->mask is not provided.
793 * @param data[in, out]
797 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
798 const void *default_mask,
801 const struct rte_flow_item_ipv6 *spec = item->spec;
802 const struct rte_flow_item_ipv6 *mask = item->mask;
803 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
804 struct ibv_flow_spec_ipv6 *ipv6;
805 unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
808 ++flow->ibv_attr->num_of_specs;
809 flow->ibv_attr->priority = 1;
810 flow->hash_fields = (IBV_RX_HASH_SRC_IPV6 |
811 IBV_RX_HASH_DST_IPV6);
812 ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
813 *ipv6 = (struct ibv_flow_spec_ipv6) {
814 .type = flow->inner | IBV_FLOW_SPEC_IPV6,
821 memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
822 RTE_DIM(ipv6->val.src_ip));
823 memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
824 RTE_DIM(ipv6->val.dst_ip));
825 memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
826 RTE_DIM(ipv6->mask.src_ip));
827 memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
828 RTE_DIM(ipv6->mask.dst_ip));
829 ipv6->mask.flow_label = mask->hdr.vtc_flow;
830 ipv6->mask.next_hdr = mask->hdr.proto;
831 ipv6->mask.hop_limit = mask->hdr.hop_limits;
832 /* Remove unwanted bits from values. */
833 for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
834 ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
835 ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
837 ipv6->val.flow_label &= ipv6->mask.flow_label;
838 ipv6->val.next_hdr &= ipv6->mask.next_hdr;
839 ipv6->val.hop_limit &= ipv6->mask.hop_limit;
844 * Convert UDP item to Verbs specification.
847 * Item specification.
848 * @param default_mask[in]
849 * Default bit-masks to use when item->mask is not provided.
850 * @param data[in, out]
854 mlx5_flow_create_udp(const struct rte_flow_item *item,
855 const void *default_mask,
858 const struct rte_flow_item_udp *spec = item->spec;
859 const struct rte_flow_item_udp *mask = item->mask;
860 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
861 struct ibv_flow_spec_tcp_udp *udp;
862 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
864 ++flow->ibv_attr->num_of_specs;
865 flow->ibv_attr->priority = 0;
866 flow->hash_fields |= (IBV_RX_HASH_SRC_PORT_UDP |
867 IBV_RX_HASH_DST_PORT_UDP);
868 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
869 *udp = (struct ibv_flow_spec_tcp_udp) {
870 .type = flow->inner | IBV_FLOW_SPEC_UDP,
877 udp->val.dst_port = spec->hdr.dst_port;
878 udp->val.src_port = spec->hdr.src_port;
879 udp->mask.dst_port = mask->hdr.dst_port;
880 udp->mask.src_port = mask->hdr.src_port;
881 /* Remove unwanted bits from values. */
882 udp->val.src_port &= udp->mask.src_port;
883 udp->val.dst_port &= udp->mask.dst_port;
888 * Convert TCP item to Verbs specification.
891 * Item specification.
892 * @param default_mask[in]
893 * Default bit-masks to use when item->mask is not provided.
894 * @param data[in, out]
898 mlx5_flow_create_tcp(const struct rte_flow_item *item,
899 const void *default_mask,
902 const struct rte_flow_item_tcp *spec = item->spec;
903 const struct rte_flow_item_tcp *mask = item->mask;
904 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
905 struct ibv_flow_spec_tcp_udp *tcp;
906 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
908 ++flow->ibv_attr->num_of_specs;
909 flow->ibv_attr->priority = 0;
910 flow->hash_fields |= (IBV_RX_HASH_SRC_PORT_TCP |
911 IBV_RX_HASH_DST_PORT_TCP);
912 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
913 *tcp = (struct ibv_flow_spec_tcp_udp) {
914 .type = flow->inner | IBV_FLOW_SPEC_TCP,
921 tcp->val.dst_port = spec->hdr.dst_port;
922 tcp->val.src_port = spec->hdr.src_port;
923 tcp->mask.dst_port = mask->hdr.dst_port;
924 tcp->mask.src_port = mask->hdr.src_port;
925 /* Remove unwanted bits from values. */
926 tcp->val.src_port &= tcp->mask.src_port;
927 tcp->val.dst_port &= tcp->mask.dst_port;
932 * Convert VXLAN item to Verbs specification.
935 * Item specification.
936 * @param default_mask[in]
937 * Default bit-masks to use when item->mask is not provided.
938 * @param data[in, out]
942 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
943 const void *default_mask,
946 const struct rte_flow_item_vxlan *spec = item->spec;
947 const struct rte_flow_item_vxlan *mask = item->mask;
948 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
949 struct ibv_flow_spec_tunnel *vxlan;
950 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
956 ++flow->ibv_attr->num_of_specs;
957 flow->ibv_attr->priority = 0;
959 vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
960 *vxlan = (struct ibv_flow_spec_tunnel) {
961 .type = flow->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
964 flow->inner = IBV_FLOW_SPEC_INNER;
969 memcpy(&id.vni[1], spec->vni, 3);
970 vxlan->val.tunnel_id = id.vlan_id;
971 memcpy(&id.vni[1], mask->vni, 3);
972 vxlan->mask.tunnel_id = id.vlan_id;
973 /* Remove unwanted bits from values. */
974 vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
979 * Convert mark/flag action to Verbs specification.
982 * Pointer to MLX5 flow structure.
987 mlx5_flow_create_flag_mark(struct mlx5_flow_parse *flow, uint32_t mark_id)
989 struct ibv_flow_spec_action_tag *tag;
990 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
993 tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
994 *tag = (struct ibv_flow_spec_action_tag){
995 .type = IBV_FLOW_SPEC_ACTION_TAG,
997 .tag_id = mlx5_flow_mark_set(mark_id),
999 ++flow->ibv_attr->num_of_specs;
1000 flow->offset += size;
1005 * Complete flow rule creation with a drop queue.
1008 * Pointer to private structure.
1010 * MLX5 flow attributes (filled by mlx5_flow_validate()).
1012 * Perform verbose error reporting if not NULL.
1015 * A flow if the rule could be created.
1017 static struct rte_flow *
1018 priv_flow_create_action_queue_drop(struct priv *priv,
1019 struct mlx5_flow_parse *flow,
1020 struct rte_flow_error *error)
1022 struct rte_flow *rte_flow;
1023 struct ibv_flow_spec_action_drop *drop;
1024 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1028 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
1030 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1031 NULL, "cannot allocate flow memory");
1035 drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
1036 *drop = (struct ibv_flow_spec_action_drop){
1037 .type = IBV_FLOW_SPEC_ACTION_DROP,
1040 ++flow->ibv_attr->num_of_specs;
1041 flow->offset += sizeof(struct ibv_flow_spec_action_drop);
1042 rte_flow->ibv_attr = flow->ibv_attr;
1043 if (!priv->dev->data->dev_started)
1045 rte_flow->drxq.hrxq.qp = priv->flow_drop_queue->qp;
1046 rte_flow->ibv_flow = ibv_create_flow(rte_flow->drxq.hrxq.qp,
1047 rte_flow->ibv_attr);
1048 if (!rte_flow->ibv_flow) {
1049 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1050 NULL, "flow rule creation failure");
1061 * Complete flow rule creation.
1064 * Pointer to private structure.
1066 * MLX5 flow attributes (filled by mlx5_flow_validate()).
1068 * Perform verbose error reporting if not NULL.
1071 * A flow if the rule could be created.
1073 static struct rte_flow *
1074 priv_flow_create_action_queue(struct priv *priv,
1075 struct mlx5_flow_parse *flow,
1076 struct rte_flow_error *error)
1078 struct rte_flow *rte_flow;
1083 assert(!flow->drop);
1084 rte_flow = rte_calloc(__func__, 1,
1086 flow->queues_n * sizeof(uint16_t),
1089 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1090 NULL, "cannot allocate flow memory");
1093 rte_flow->mark = flow->mark;
1094 rte_flow->ibv_attr = flow->ibv_attr;
1095 rte_flow->queues = (uint16_t (*)[])(rte_flow + 1);
1096 memcpy(rte_flow->queues, flow->queues,
1097 flow->queues_n * sizeof(uint16_t));
1098 rte_flow->queues_n = flow->queues_n;
1099 rte_flow->frxq.hash_fields = flow->hash_fields;
1100 rte_flow->frxq.hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1101 rss_hash_default_key_len,
1103 (*rte_flow->queues),
1104 rte_flow->queues_n);
1105 if (!rte_flow->frxq.hrxq) {
1106 rte_flow->frxq.hrxq =
1107 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1108 rss_hash_default_key_len,
1110 (*rte_flow->queues),
1111 rte_flow->queues_n);
1112 if (!rte_flow->frxq.hrxq) {
1113 rte_flow_error_set(error, ENOMEM,
1114 RTE_FLOW_ERROR_TYPE_HANDLE,
1115 NULL, "cannot create hash rxq");
1119 for (i = 0; i != flow->queues_n; ++i) {
1120 struct mlx5_rxq_data *q =
1121 (*priv->rxqs)[flow->queues[i]];
1123 q->mark |= flow->mark;
1125 if (!priv->dev->data->dev_started)
1127 rte_flow->ibv_flow = ibv_create_flow(rte_flow->frxq.hrxq->qp,
1128 rte_flow->ibv_attr);
1129 if (!rte_flow->ibv_flow) {
1130 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1131 NULL, "flow rule creation failure");
1137 if (rte_flow->frxq.hrxq)
1138 mlx5_priv_hrxq_release(priv, rte_flow->frxq.hrxq);
1147 * Pointer to private structure.
1149 * Flow rule attributes.
1150 * @param[in] pattern
1151 * Pattern specification (list terminated by the END pattern item).
1152 * @param[in] actions
1153 * Associated actions (list terminated by the END action).
1155 * Perform verbose error reporting if not NULL.
1156 * @param[in,out] parser
1157 * MLX5 parser structure.
1160 * 0 on success, negative errno value on failure.
1163 priv_flow_validate(struct priv *priv,
1164 const struct rte_flow_attr *attr,
1165 const struct rte_flow_item items[],
1166 const struct rte_flow_action actions[],
1167 struct rte_flow_error *error,
1168 struct mlx5_flow_parse *parser)
1172 err = priv_flow_convert(priv, attr, items, actions, error, parser);
1176 parser->offset += sizeof(struct ibv_flow_spec_action_tag);
1177 parser->ibv_attr = rte_malloc(__func__, parser->offset, 0);
1178 if (!parser->ibv_attr) {
1179 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1180 NULL, "cannot allocate ibv_attr memory");
1184 *parser->ibv_attr = (struct ibv_flow_attr){
1185 .type = IBV_FLOW_ATTR_NORMAL,
1186 .size = sizeof(struct ibv_flow_attr),
1187 .priority = attr->priority,
1192 err = priv_flow_convert(priv, attr, items, actions, error, parser);
1193 if (err || parser->create)
1196 mlx5_flow_create_flag_mark(parser, parser->mark_id);
1199 if (parser->ibv_attr)
1200 rte_free(parser->ibv_attr);
1208 * Pointer to private structure.
1210 * Pointer to a TAILQ flow list.
1212 * Flow rule attributes.
1213 * @param[in] pattern
1214 * Pattern specification (list terminated by the END pattern item).
1215 * @param[in] actions
1216 * Associated actions (list terminated by the END action).
1218 * Perform verbose error reporting if not NULL.
1221 * A flow on success, NULL otherwise.
1223 static struct rte_flow *
1224 priv_flow_create(struct priv *priv,
1225 struct mlx5_flows *list,
1226 const struct rte_flow_attr *attr,
1227 const struct rte_flow_item items[],
1228 const struct rte_flow_action actions[],
1229 struct rte_flow_error *error)
1231 struct mlx5_flow_parse parser = { .create = 1, };
1232 struct rte_flow *flow;
1235 err = priv_flow_validate(priv, attr, items, actions, error, &parser);
1239 flow = priv_flow_create_action_queue_drop(priv, &parser, error);
1241 flow = priv_flow_create_action_queue(priv, &parser, error);
1244 TAILQ_INSERT_TAIL(list, flow, next);
1245 DEBUG("Flow created %p", (void *)flow);
1248 if (parser.ibv_attr)
1249 rte_free(parser.ibv_attr);
1254 * Validate a flow supported by the NIC.
1256 * @see rte_flow_validate()
1260 mlx5_flow_validate(struct rte_eth_dev *dev,
1261 const struct rte_flow_attr *attr,
1262 const struct rte_flow_item items[],
1263 const struct rte_flow_action actions[],
1264 struct rte_flow_error *error)
1266 struct priv *priv = dev->data->dev_private;
1268 struct mlx5_flow_parse parser = { .create = 0, };
1271 ret = priv_flow_validate(priv, attr, items, actions, error, &parser);
1279 * @see rte_flow_create()
1283 mlx5_flow_create(struct rte_eth_dev *dev,
1284 const struct rte_flow_attr *attr,
1285 const struct rte_flow_item items[],
1286 const struct rte_flow_action actions[],
1287 struct rte_flow_error *error)
1289 struct priv *priv = dev->data->dev_private;
1290 struct rte_flow *flow;
1293 flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
1303 * Pointer to private structure.
1305 * Pointer to a TAILQ flow list.
1310 priv_flow_destroy(struct priv *priv,
1311 struct mlx5_flows *list,
1312 struct rte_flow *flow)
1318 if (flow->drop || !flow->mark)
1320 queues = flow->frxq.hrxq->ind_table->queues;
1321 queues_n = flow->frxq.hrxq->ind_table->queues_n;
1322 for (i = 0; i != queues_n; ++i) {
1323 struct rte_flow *tmp;
1324 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[i]];
1328 * To remove the mark from the queue, the queue must not be
1329 * present in any other marked flow (RSS or not).
1331 TAILQ_FOREACH(tmp, list, next) {
1337 (j != tmp->frxq.hrxq->ind_table->queues_n) &&
1340 if (tmp->frxq.hrxq->ind_table->queues[j] ==
1344 rxq_data->mark = mark;
1348 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1350 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1351 TAILQ_REMOVE(list, flow, next);
1352 rte_free(flow->ibv_attr);
1353 DEBUG("Flow destroyed %p", (void *)flow);
1360 * @see rte_flow_destroy()
1364 mlx5_flow_destroy(struct rte_eth_dev *dev,
1365 struct rte_flow *flow,
1366 struct rte_flow_error *error)
1368 struct priv *priv = dev->data->dev_private;
1372 priv_flow_destroy(priv, &priv->flows, flow);
1378 * Destroy all flows.
1381 * Pointer to private structure.
1383 * Pointer to a TAILQ flow list.
1386 priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
1388 while (!TAILQ_EMPTY(list)) {
1389 struct rte_flow *flow;
1391 flow = TAILQ_FIRST(list);
1392 priv_flow_destroy(priv, list, flow);
1397 * Destroy all flows.
1399 * @see rte_flow_flush()
1403 mlx5_flow_flush(struct rte_eth_dev *dev,
1404 struct rte_flow_error *error)
1406 struct priv *priv = dev->data->dev_private;
1410 priv_flow_flush(priv, &priv->flows);
1416 * Create drop queue.
1419 * Pointer to private structure.
1425 priv_flow_create_drop_queue(struct priv *priv)
1427 struct mlx5_hrxq_drop *fdq = NULL;
1431 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
1433 WARN("cannot allocate memory for drop queue");
1436 fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
1438 WARN("cannot allocate CQ for drop queue");
1441 fdq->wq = ibv_create_wq(priv->ctx,
1442 &(struct ibv_wq_init_attr){
1443 .wq_type = IBV_WQT_RQ,
1450 WARN("cannot allocate WQ for drop queue");
1453 fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
1454 &(struct ibv_rwq_ind_table_init_attr){
1455 .log_ind_tbl_size = 0,
1456 .ind_tbl = &fdq->wq,
1459 if (!fdq->ind_table) {
1460 WARN("cannot allocate indirection table for drop queue");
1463 fdq->qp = ibv_create_qp_ex(priv->ctx,
1464 &(struct ibv_qp_init_attr_ex){
1465 .qp_type = IBV_QPT_RAW_PACKET,
1467 IBV_QP_INIT_ATTR_PD |
1468 IBV_QP_INIT_ATTR_IND_TABLE |
1469 IBV_QP_INIT_ATTR_RX_HASH,
1470 .rx_hash_conf = (struct ibv_rx_hash_conf){
1472 IBV_RX_HASH_FUNC_TOEPLITZ,
1473 .rx_hash_key_len = rss_hash_default_key_len,
1474 .rx_hash_key = rss_hash_default_key,
1475 .rx_hash_fields_mask = 0,
1477 .rwq_ind_tbl = fdq->ind_table,
1481 WARN("cannot allocate QP for drop queue");
1484 priv->flow_drop_queue = fdq;
1488 claim_zero(ibv_destroy_qp(fdq->qp));
1490 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1492 claim_zero(ibv_destroy_wq(fdq->wq));
1494 claim_zero(ibv_destroy_cq(fdq->cq));
1497 priv->flow_drop_queue = NULL;
1502 * Delete drop queue.
1505 * Pointer to private structure.
1508 priv_flow_delete_drop_queue(struct priv *priv)
1510 struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
1515 claim_zero(ibv_destroy_qp(fdq->qp));
1517 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1519 claim_zero(ibv_destroy_wq(fdq->wq));
1521 claim_zero(ibv_destroy_cq(fdq->cq));
1523 priv->flow_drop_queue = NULL;
1530 * Pointer to private structure.
1532 * Pointer to a TAILQ flow list.
1535 priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
1537 struct rte_flow *flow;
1539 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
1540 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1541 flow->ibv_flow = NULL;
1542 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1543 flow->frxq.hrxq = NULL;
1546 struct mlx5_ind_table_ibv *ind_tbl =
1547 flow->frxq.hrxq->ind_table;
1549 for (n = 0; n < ind_tbl->queues_n; ++n)
1550 (*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
1552 DEBUG("Flow %p removed", (void *)flow);
1560 * Pointer to private structure.
1562 * Pointer to a TAILQ flow list.
1565 * 0 on success, a errno value otherwise and rte_errno is set.
1568 priv_flow_start(struct priv *priv, struct mlx5_flows *list)
1570 struct rte_flow *flow;
1572 TAILQ_FOREACH(flow, list, next) {
1573 if (flow->frxq.hrxq)
1576 mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1577 rss_hash_default_key_len,
1578 flow->frxq.hash_fields,
1581 if (flow->frxq.hrxq)
1584 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1585 rss_hash_default_key_len,
1586 flow->frxq.hash_fields,
1589 if (!flow->frxq.hrxq) {
1590 DEBUG("Flow %p cannot be applied",
1596 flow->ibv_flow = ibv_create_flow(flow->frxq.hrxq->qp,
1598 if (!flow->ibv_flow) {
1599 DEBUG("Flow %p cannot be applied", (void *)flow);
1603 DEBUG("Flow %p applied", (void *)flow);
1608 n < flow->frxq.hrxq->ind_table->queues_n;
1611 flow->frxq.hrxq->ind_table->queues[n];
1612 (*priv->rxqs)[idx]->mark = 1;
1622 * @see rte_flow_isolate()
1626 mlx5_flow_isolate(struct rte_eth_dev *dev,
1628 struct rte_flow_error *error)
1630 struct priv *priv = dev->data->dev_private;
1633 if (dev->data->dev_started) {
1634 rte_flow_error_set(error, EBUSY,
1635 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1637 "port must be stopped first");
1641 priv->isolated = !!enable;
1647 * Verify the flow list is empty
1650 * Pointer to private structure.
1652 * @return the number of flows not released.
1655 priv_flow_verify(struct priv *priv)
1657 struct rte_flow *flow;
1660 TAILQ_FOREACH(flow, &priv->flows, next) {
1661 DEBUG("%p: flow %p still referenced", (void *)priv,
1669 * Enable a control flow configured from the control plane.
1672 * Pointer to Ethernet device.
1674 * An Ethernet flow spec to apply.
1676 * An Ethernet flow mask to apply.
1678 * A VLAN flow spec to apply.
1680 * A VLAN flow mask to apply.
1686 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1687 struct rte_flow_item_eth *eth_spec,
1688 struct rte_flow_item_eth *eth_mask,
1689 struct rte_flow_item_vlan *vlan_spec,
1690 struct rte_flow_item_vlan *vlan_mask)
1692 struct priv *priv = dev->data->dev_private;
1693 const struct rte_flow_attr attr = {
1695 .priority = MLX5_CTRL_FLOW_PRIORITY,
1697 struct rte_flow_item items[] = {
1699 .type = RTE_FLOW_ITEM_TYPE_ETH,
1705 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
1706 RTE_FLOW_ITEM_TYPE_END,
1712 .type = RTE_FLOW_ITEM_TYPE_END,
1715 struct rte_flow_action actions[] = {
1717 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1718 .conf = &(struct rte_flow_action_queue){
1723 .type = RTE_FLOW_ACTION_TYPE_END,
1726 struct rte_flow *flow;
1727 struct rte_flow_error error;
1729 flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
1737 * Enable a flow control configured from the control plane.
1740 * Pointer to Ethernet device.
1742 * An Ethernet flow spec to apply.
1744 * An Ethernet flow mask to apply.
1750 mlx5_ctrl_flow(struct rte_eth_dev *dev,
1751 struct rte_flow_item_eth *eth_spec,
1752 struct rte_flow_item_eth *eth_mask)
1754 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);