4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
38 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #pragma GCC diagnostic ignored "-Wpedantic"
42 #include <infiniband/verbs.h>
44 #pragma GCC diagnostic error "-Wpedantic"
47 #include <rte_ethdev.h>
49 #include <rte_flow_driver.h>
50 #include <rte_malloc.h>
55 /* Define minimal priority for control plane flows. */
56 #define MLX5_CTRL_FLOW_PRIORITY 4
59 mlx5_flow_create_eth(const struct rte_flow_item *item,
60 const void *default_mask,
64 mlx5_flow_create_vlan(const struct rte_flow_item *item,
65 const void *default_mask,
69 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
70 const void *default_mask,
74 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
75 const void *default_mask,
79 mlx5_flow_create_udp(const struct rte_flow_item *item,
80 const void *default_mask,
84 mlx5_flow_create_tcp(const struct rte_flow_item *item,
85 const void *default_mask,
89 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
90 const void *default_mask,
93 /** Structure for Drop queue. */
94 struct mlx5_hrxq_drop {
95 struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
96 struct ibv_qp *qp; /**< Verbs queue pair. */
97 struct ibv_wq *wq; /**< Verbs work queue. */
98 struct ibv_cq *cq; /**< Verbs completion queue. */
101 /* Flows structures. */
103 uint64_t hash_fields; /**< Fields that participate in the hash. */
104 struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
107 /* Drop flows structures. */
108 struct mlx5_flow_drop {
109 struct mlx5_hrxq_drop hrxq; /**< Drop hash Rx queue. */
113 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
114 uint32_t mark:1; /**< Set if the flow is marked. */
115 uint32_t drop:1; /**< Drop queue. */
116 struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
117 struct ibv_flow *ibv_flow; /**< Verbs flow. */
118 uint16_t queues_n; /**< Number of entries in queue[]. */
119 uint16_t (*queues)[]; /**< Queues indexes to use. */
121 struct mlx5_flow frxq; /**< Flow with Rx queue. */
122 struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
126 /** Static initializer for items. */
128 (const enum rte_flow_item_type []){ \
129 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
132 /** Structure to generate a simple graph of layers supported by the NIC. */
133 struct mlx5_flow_items {
134 /** List of possible actions for these items. */
135 const enum rte_flow_action_type *const actions;
136 /** Bit-masks corresponding to the possibilities for the item. */
139 * Default bit-masks to use when item->mask is not provided. When
140 * \default_mask is also NULL, the full supported bit-mask (\mask) is
143 const void *default_mask;
144 /** Bit-masks size in bytes. */
145 const unsigned int mask_sz;
147 * Conversion function from rte_flow to NIC specific flow.
150 * rte_flow item to convert.
151 * @param default_mask
152 * Default bit-masks to use when item->mask is not provided.
154 * Internal structure to store the conversion.
157 * 0 on success, negative value otherwise.
159 int (*convert)(const struct rte_flow_item *item,
160 const void *default_mask,
162 /** Size in bytes of the destination structure. */
163 const unsigned int dst_sz;
164 /** List of possible following items. */
165 const enum rte_flow_item_type *const items;
168 /** Valid action for this PMD. */
169 static const enum rte_flow_action_type valid_actions[] = {
170 RTE_FLOW_ACTION_TYPE_DROP,
171 RTE_FLOW_ACTION_TYPE_QUEUE,
172 RTE_FLOW_ACTION_TYPE_MARK,
173 RTE_FLOW_ACTION_TYPE_FLAG,
174 RTE_FLOW_ACTION_TYPE_END,
177 /** Graph of supported items and associated actions. */
178 static const struct mlx5_flow_items mlx5_flow_items[] = {
179 [RTE_FLOW_ITEM_TYPE_END] = {
180 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
181 RTE_FLOW_ITEM_TYPE_VXLAN),
183 [RTE_FLOW_ITEM_TYPE_ETH] = {
184 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
185 RTE_FLOW_ITEM_TYPE_IPV4,
186 RTE_FLOW_ITEM_TYPE_IPV6),
187 .actions = valid_actions,
188 .mask = &(const struct rte_flow_item_eth){
189 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
190 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
193 .default_mask = &rte_flow_item_eth_mask,
194 .mask_sz = sizeof(struct rte_flow_item_eth),
195 .convert = mlx5_flow_create_eth,
196 .dst_sz = sizeof(struct ibv_flow_spec_eth),
198 [RTE_FLOW_ITEM_TYPE_VLAN] = {
199 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
200 RTE_FLOW_ITEM_TYPE_IPV6),
201 .actions = valid_actions,
202 .mask = &(const struct rte_flow_item_vlan){
205 .default_mask = &rte_flow_item_vlan_mask,
206 .mask_sz = sizeof(struct rte_flow_item_vlan),
207 .convert = mlx5_flow_create_vlan,
210 [RTE_FLOW_ITEM_TYPE_IPV4] = {
211 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
212 RTE_FLOW_ITEM_TYPE_TCP),
213 .actions = valid_actions,
214 .mask = &(const struct rte_flow_item_ipv4){
218 .type_of_service = -1,
222 .default_mask = &rte_flow_item_ipv4_mask,
223 .mask_sz = sizeof(struct rte_flow_item_ipv4),
224 .convert = mlx5_flow_create_ipv4,
225 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
227 [RTE_FLOW_ITEM_TYPE_IPV6] = {
228 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
229 RTE_FLOW_ITEM_TYPE_TCP),
230 .actions = valid_actions,
231 .mask = &(const struct rte_flow_item_ipv6){
234 0xff, 0xff, 0xff, 0xff,
235 0xff, 0xff, 0xff, 0xff,
236 0xff, 0xff, 0xff, 0xff,
237 0xff, 0xff, 0xff, 0xff,
240 0xff, 0xff, 0xff, 0xff,
241 0xff, 0xff, 0xff, 0xff,
242 0xff, 0xff, 0xff, 0xff,
243 0xff, 0xff, 0xff, 0xff,
250 .default_mask = &rte_flow_item_ipv6_mask,
251 .mask_sz = sizeof(struct rte_flow_item_ipv6),
252 .convert = mlx5_flow_create_ipv6,
253 .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
255 [RTE_FLOW_ITEM_TYPE_UDP] = {
256 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
257 .actions = valid_actions,
258 .mask = &(const struct rte_flow_item_udp){
264 .default_mask = &rte_flow_item_udp_mask,
265 .mask_sz = sizeof(struct rte_flow_item_udp),
266 .convert = mlx5_flow_create_udp,
267 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
269 [RTE_FLOW_ITEM_TYPE_TCP] = {
270 .actions = valid_actions,
271 .mask = &(const struct rte_flow_item_tcp){
277 .default_mask = &rte_flow_item_tcp_mask,
278 .mask_sz = sizeof(struct rte_flow_item_tcp),
279 .convert = mlx5_flow_create_tcp,
280 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
282 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
283 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
284 .actions = valid_actions,
285 .mask = &(const struct rte_flow_item_vxlan){
286 .vni = "\xff\xff\xff",
288 .default_mask = &rte_flow_item_vxlan_mask,
289 .mask_sz = sizeof(struct rte_flow_item_vxlan),
290 .convert = mlx5_flow_create_vxlan,
291 .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
295 /* Structure to parse actions. */
296 struct mlx5_flow_action {
297 uint32_t queue:1; /**< Target is a receive queue. */
298 uint32_t drop:1; /**< Target is a drop queue. */
299 uint32_t mark:1; /**< Mark is present in the flow. */
300 uint32_t mark_id; /**< Mark identifier. */
301 uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
302 uint16_t queues_n; /**< Number of entries in queue[]. */
305 /** Structure to pass to the conversion function. */
306 struct mlx5_flow_parse {
307 struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
308 unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
309 uint32_t inner; /**< Set once VXLAN is encountered. */
310 uint32_t create:1; /**< Leave allocated resources on exit. */
311 uint64_t hash_fields; /**< Fields that participate in the hash. */
312 struct mlx5_flow_action actions; /**< Parsed action result. */
315 static const struct rte_flow_ops mlx5_flow_ops = {
316 .validate = mlx5_flow_validate,
317 .create = mlx5_flow_create,
318 .destroy = mlx5_flow_destroy,
319 .flush = mlx5_flow_flush,
321 .isolate = mlx5_flow_isolate,
325 * Manage filter operations.
328 * Pointer to Ethernet device structure.
332 * Operation to perform.
334 * Pointer to operation-specific structure.
337 * 0 on success, negative errno value on failure.
340 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
341 enum rte_filter_type filter_type,
342 enum rte_filter_op filter_op,
347 if (filter_type == RTE_ETH_FILTER_GENERIC) {
348 if (filter_op != RTE_ETH_FILTER_GET)
350 *(const void **)arg = &mlx5_flow_ops;
353 ERROR("%p: filter type (%d) not supported",
354 (void *)dev, filter_type);
359 * Check support for a given item.
362 * Item specification.
364 * Bit-masks covering supported fields to compare with spec, last and mask in
367 * Bit-Mask size in bytes.
373 mlx5_flow_item_validate(const struct rte_flow_item *item,
374 const uint8_t *mask, unsigned int size)
378 if (!item->spec && (item->mask || item->last))
380 if (item->spec && !item->mask) {
382 const uint8_t *spec = item->spec;
384 for (i = 0; i < size; ++i)
385 if ((spec[i] | mask[i]) != mask[i])
388 if (item->last && !item->mask) {
390 const uint8_t *spec = item->last;
392 for (i = 0; i < size; ++i)
393 if ((spec[i] | mask[i]) != mask[i])
398 const uint8_t *spec = item->mask;
400 for (i = 0; i < size; ++i)
401 if ((spec[i] | mask[i]) != mask[i])
404 if (item->spec && item->last) {
407 const uint8_t *apply = mask;
412 for (i = 0; i < size; ++i) {
413 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
414 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
416 ret = memcmp(spec, last, size);
422 * Validate and convert a flow supported by the NIC.
425 * Pointer to private structure.
427 * Flow rule attributes.
429 * Pattern specification (list terminated by the END pattern item).
431 * Associated actions (list terminated by the END action).
433 * Perform verbose error reporting if not NULL.
434 * @param[in, out] flow
435 * Flow structure to update.
438 * 0 on success, a negative errno value otherwise and rte_errno is set.
441 priv_flow_convert(struct priv *priv,
442 const struct rte_flow_attr *attr,
443 const struct rte_flow_item items[],
444 const struct rte_flow_action actions[],
445 struct rte_flow_error *error,
446 struct mlx5_flow_parse *flow)
448 const struct mlx5_flow_items *cur_item = mlx5_flow_items;
451 *flow = (struct mlx5_flow_parse){
452 .ibv_attr = flow->ibv_attr,
453 .create = flow->create,
454 .offset = sizeof(struct ibv_flow_attr),
456 .mark_id = MLX5_FLOW_MARK_DEFAULT,
460 rte_flow_error_set(error, ENOTSUP,
461 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
463 "groups are not supported");
466 if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
467 rte_flow_error_set(error, ENOTSUP,
468 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
470 "priorities are not supported");
474 rte_flow_error_set(error, ENOTSUP,
475 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
477 "egress is not supported");
480 if (!attr->ingress) {
481 rte_flow_error_set(error, ENOTSUP,
482 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
484 "only ingress is supported");
487 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
488 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
490 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
491 flow->actions.drop = 1;
492 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
493 const struct rte_flow_action_queue *queue =
494 (const struct rte_flow_action_queue *)
499 if (!queue || (queue->index > (priv->rxqs_n - 1)))
500 goto exit_action_not_supported;
501 for (n = 0; n < flow->actions.queues_n; ++n) {
502 if (flow->actions.queues[n] == queue->index) {
507 if (flow->actions.queues_n > 1 && !found) {
508 rte_flow_error_set(error, ENOTSUP,
509 RTE_FLOW_ERROR_TYPE_ACTION,
511 "queue action not in RSS queues");
515 flow->actions.queue = 1;
516 flow->actions.queues_n = 1;
517 flow->actions.queues[0] = queue->index;
519 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
520 const struct rte_flow_action_rss *rss =
521 (const struct rte_flow_action_rss *)
525 if (!rss || !rss->num) {
526 rte_flow_error_set(error, EINVAL,
527 RTE_FLOW_ERROR_TYPE_ACTION,
532 if (flow->actions.queues_n == 1) {
535 assert(flow->actions.queues_n);
536 for (n = 0; n < rss->num; ++n) {
537 if (flow->actions.queues[0] ==
544 rte_flow_error_set(error, ENOTSUP,
545 RTE_FLOW_ERROR_TYPE_ACTION,
547 "queue action not in RSS"
552 for (n = 0; n < rss->num; ++n) {
553 if (rss->queue[n] >= priv->rxqs_n) {
554 rte_flow_error_set(error, EINVAL,
555 RTE_FLOW_ERROR_TYPE_ACTION,
557 "queue id > number of"
562 flow->actions.queue = 1;
563 for (n = 0; n < rss->num; ++n)
564 flow->actions.queues[n] = rss->queue[n];
565 flow->actions.queues_n = rss->num;
566 } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
567 const struct rte_flow_action_mark *mark =
568 (const struct rte_flow_action_mark *)
572 rte_flow_error_set(error, EINVAL,
573 RTE_FLOW_ERROR_TYPE_ACTION,
575 "mark must be defined");
577 } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
578 rte_flow_error_set(error, ENOTSUP,
579 RTE_FLOW_ERROR_TYPE_ACTION,
581 "mark must be between 0"
585 flow->actions.mark = 1;
586 flow->actions.mark_id = mark->id;
587 } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
588 flow->actions.mark = 1;
590 goto exit_action_not_supported;
593 if (flow->actions.mark && !flow->ibv_attr && !flow->actions.drop)
594 flow->offset += sizeof(struct ibv_flow_spec_action_tag);
595 if (!flow->ibv_attr && flow->actions.drop)
596 flow->offset += sizeof(struct ibv_flow_spec_action_drop);
597 if (!flow->actions.queue && !flow->actions.drop) {
598 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
599 NULL, "no valid action");
602 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
603 const struct mlx5_flow_items *token = NULL;
607 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
611 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
613 if (cur_item->items[i] == items->type) {
614 token = &mlx5_flow_items[items->type];
619 goto exit_item_not_supported;
621 err = mlx5_flow_item_validate(items,
622 (const uint8_t *)cur_item->mask,
625 goto exit_item_not_supported;
626 if (flow->ibv_attr && cur_item->convert) {
627 err = cur_item->convert(items,
628 (cur_item->default_mask ?
629 cur_item->default_mask :
633 goto exit_item_not_supported;
634 } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
636 rte_flow_error_set(error, ENOTSUP,
637 RTE_FLOW_ERROR_TYPE_ITEM,
639 "cannot recognize multiple"
640 " VXLAN encapsulations");
645 flow->offset += cur_item->dst_sz;
648 exit_item_not_supported:
649 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
650 items, "item not supported");
652 exit_action_not_supported:
653 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
654 actions, "action not supported");
659 * Convert Ethernet item to Verbs specification.
662 * Item specification.
663 * @param default_mask[in]
664 * Default bit-masks to use when item->mask is not provided.
665 * @param data[in, out]
669 mlx5_flow_create_eth(const struct rte_flow_item *item,
670 const void *default_mask,
673 const struct rte_flow_item_eth *spec = item->spec;
674 const struct rte_flow_item_eth *mask = item->mask;
675 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
676 struct ibv_flow_spec_eth *eth;
677 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
680 ++flow->ibv_attr->num_of_specs;
681 flow->ibv_attr->priority = 2;
682 flow->hash_fields = 0;
683 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
684 *eth = (struct ibv_flow_spec_eth) {
685 .type = flow->inner | IBV_FLOW_SPEC_ETH,
692 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
693 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
694 eth->val.ether_type = spec->type;
695 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
696 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
697 eth->mask.ether_type = mask->type;
698 /* Remove unwanted bits from values. */
699 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
700 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
701 eth->val.src_mac[i] &= eth->mask.src_mac[i];
703 eth->val.ether_type &= eth->mask.ether_type;
708 * Convert VLAN item to Verbs specification.
711 * Item specification.
712 * @param default_mask[in]
713 * Default bit-masks to use when item->mask is not provided.
714 * @param data[in, out]
718 mlx5_flow_create_vlan(const struct rte_flow_item *item,
719 const void *default_mask,
722 const struct rte_flow_item_vlan *spec = item->spec;
723 const struct rte_flow_item_vlan *mask = item->mask;
724 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
725 struct ibv_flow_spec_eth *eth;
726 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
728 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
733 eth->val.vlan_tag = spec->tci;
734 eth->mask.vlan_tag = mask->tci;
735 eth->val.vlan_tag &= eth->mask.vlan_tag;
740 * Convert IPv4 item to Verbs specification.
743 * Item specification.
744 * @param default_mask[in]
745 * Default bit-masks to use when item->mask is not provided.
746 * @param data[in, out]
750 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
751 const void *default_mask,
754 const struct rte_flow_item_ipv4 *spec = item->spec;
755 const struct rte_flow_item_ipv4 *mask = item->mask;
756 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
757 struct ibv_flow_spec_ipv4_ext *ipv4;
758 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
760 ++flow->ibv_attr->num_of_specs;
761 flow->ibv_attr->priority = 1;
762 flow->hash_fields = (IBV_RX_HASH_SRC_IPV4 |
763 IBV_RX_HASH_DST_IPV4);
764 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
765 *ipv4 = (struct ibv_flow_spec_ipv4_ext) {
766 .type = flow->inner | IBV_FLOW_SPEC_IPV4_EXT,
773 ipv4->val = (struct ibv_flow_ipv4_ext_filter){
774 .src_ip = spec->hdr.src_addr,
775 .dst_ip = spec->hdr.dst_addr,
776 .proto = spec->hdr.next_proto_id,
777 .tos = spec->hdr.type_of_service,
779 ipv4->mask = (struct ibv_flow_ipv4_ext_filter){
780 .src_ip = mask->hdr.src_addr,
781 .dst_ip = mask->hdr.dst_addr,
782 .proto = mask->hdr.next_proto_id,
783 .tos = mask->hdr.type_of_service,
785 /* Remove unwanted bits from values. */
786 ipv4->val.src_ip &= ipv4->mask.src_ip;
787 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
788 ipv4->val.proto &= ipv4->mask.proto;
789 ipv4->val.tos &= ipv4->mask.tos;
794 * Convert IPv6 item to Verbs specification.
797 * Item specification.
798 * @param default_mask[in]
799 * Default bit-masks to use when item->mask is not provided.
800 * @param data[in, out]
804 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
805 const void *default_mask,
808 const struct rte_flow_item_ipv6 *spec = item->spec;
809 const struct rte_flow_item_ipv6 *mask = item->mask;
810 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
811 struct ibv_flow_spec_ipv6 *ipv6;
812 unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
815 ++flow->ibv_attr->num_of_specs;
816 flow->ibv_attr->priority = 1;
817 flow->hash_fields = (IBV_RX_HASH_SRC_IPV6 |
818 IBV_RX_HASH_DST_IPV6);
819 ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
820 *ipv6 = (struct ibv_flow_spec_ipv6) {
821 .type = flow->inner | IBV_FLOW_SPEC_IPV6,
828 memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
829 RTE_DIM(ipv6->val.src_ip));
830 memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
831 RTE_DIM(ipv6->val.dst_ip));
832 memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
833 RTE_DIM(ipv6->mask.src_ip));
834 memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
835 RTE_DIM(ipv6->mask.dst_ip));
836 ipv6->mask.flow_label = mask->hdr.vtc_flow;
837 ipv6->mask.next_hdr = mask->hdr.proto;
838 ipv6->mask.hop_limit = mask->hdr.hop_limits;
839 /* Remove unwanted bits from values. */
840 for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
841 ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
842 ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
844 ipv6->val.flow_label &= ipv6->mask.flow_label;
845 ipv6->val.next_hdr &= ipv6->mask.next_hdr;
846 ipv6->val.hop_limit &= ipv6->mask.hop_limit;
851 * Convert UDP item to Verbs specification.
854 * Item specification.
855 * @param default_mask[in]
856 * Default bit-masks to use when item->mask is not provided.
857 * @param data[in, out]
861 mlx5_flow_create_udp(const struct rte_flow_item *item,
862 const void *default_mask,
865 const struct rte_flow_item_udp *spec = item->spec;
866 const struct rte_flow_item_udp *mask = item->mask;
867 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
868 struct ibv_flow_spec_tcp_udp *udp;
869 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
871 ++flow->ibv_attr->num_of_specs;
872 flow->ibv_attr->priority = 0;
873 flow->hash_fields |= (IBV_RX_HASH_SRC_PORT_UDP |
874 IBV_RX_HASH_DST_PORT_UDP);
875 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
876 *udp = (struct ibv_flow_spec_tcp_udp) {
877 .type = flow->inner | IBV_FLOW_SPEC_UDP,
884 udp->val.dst_port = spec->hdr.dst_port;
885 udp->val.src_port = spec->hdr.src_port;
886 udp->mask.dst_port = mask->hdr.dst_port;
887 udp->mask.src_port = mask->hdr.src_port;
888 /* Remove unwanted bits from values. */
889 udp->val.src_port &= udp->mask.src_port;
890 udp->val.dst_port &= udp->mask.dst_port;
895 * Convert TCP item to Verbs specification.
898 * Item specification.
899 * @param default_mask[in]
900 * Default bit-masks to use when item->mask is not provided.
901 * @param data[in, out]
905 mlx5_flow_create_tcp(const struct rte_flow_item *item,
906 const void *default_mask,
909 const struct rte_flow_item_tcp *spec = item->spec;
910 const struct rte_flow_item_tcp *mask = item->mask;
911 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
912 struct ibv_flow_spec_tcp_udp *tcp;
913 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
915 ++flow->ibv_attr->num_of_specs;
916 flow->ibv_attr->priority = 0;
917 flow->hash_fields |= (IBV_RX_HASH_SRC_PORT_TCP |
918 IBV_RX_HASH_DST_PORT_TCP);
919 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
920 *tcp = (struct ibv_flow_spec_tcp_udp) {
921 .type = flow->inner | IBV_FLOW_SPEC_TCP,
928 tcp->val.dst_port = spec->hdr.dst_port;
929 tcp->val.src_port = spec->hdr.src_port;
930 tcp->mask.dst_port = mask->hdr.dst_port;
931 tcp->mask.src_port = mask->hdr.src_port;
932 /* Remove unwanted bits from values. */
933 tcp->val.src_port &= tcp->mask.src_port;
934 tcp->val.dst_port &= tcp->mask.dst_port;
939 * Convert VXLAN item to Verbs specification.
942 * Item specification.
943 * @param default_mask[in]
944 * Default bit-masks to use when item->mask is not provided.
945 * @param data[in, out]
949 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
950 const void *default_mask,
953 const struct rte_flow_item_vxlan *spec = item->spec;
954 const struct rte_flow_item_vxlan *mask = item->mask;
955 struct mlx5_flow_parse *flow = (struct mlx5_flow_parse *)data;
956 struct ibv_flow_spec_tunnel *vxlan;
957 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
963 ++flow->ibv_attr->num_of_specs;
964 flow->ibv_attr->priority = 0;
966 vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
967 *vxlan = (struct ibv_flow_spec_tunnel) {
968 .type = flow->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
971 flow->inner = IBV_FLOW_SPEC_INNER;
976 memcpy(&id.vni[1], spec->vni, 3);
977 vxlan->val.tunnel_id = id.vlan_id;
978 memcpy(&id.vni[1], mask->vni, 3);
979 vxlan->mask.tunnel_id = id.vlan_id;
980 /* Remove unwanted bits from values. */
981 vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
986 * Convert mark/flag action to Verbs specification.
989 * Pointer to MLX5 flow structure.
994 mlx5_flow_create_flag_mark(struct mlx5_flow_parse *flow, uint32_t mark_id)
996 struct ibv_flow_spec_action_tag *tag;
997 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
999 assert(flow->actions.mark);
1000 tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
1001 *tag = (struct ibv_flow_spec_action_tag){
1002 .type = IBV_FLOW_SPEC_ACTION_TAG,
1004 .tag_id = mlx5_flow_mark_set(mark_id),
1006 ++flow->ibv_attr->num_of_specs;
1007 flow->offset += size;
1012 * Complete flow rule creation with a drop queue.
1015 * Pointer to private structure.
1017 * MLX5 flow attributes (filled by mlx5_flow_validate()).
1019 * Perform verbose error reporting if not NULL.
1022 * A flow if the rule could be created.
1024 static struct rte_flow *
1025 priv_flow_create_action_queue_drop(struct priv *priv,
1026 struct mlx5_flow_parse *flow,
1027 struct rte_flow_error *error)
1029 struct rte_flow *rte_flow;
1030 struct ibv_flow_spec_action_drop *drop;
1031 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1035 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
1037 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1038 NULL, "cannot allocate flow memory");
1042 drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
1043 *drop = (struct ibv_flow_spec_action_drop){
1044 .type = IBV_FLOW_SPEC_ACTION_DROP,
1047 ++flow->ibv_attr->num_of_specs;
1048 flow->offset += sizeof(struct ibv_flow_spec_action_drop);
1049 rte_flow->ibv_attr = flow->ibv_attr;
1050 if (!priv->dev->data->dev_started)
1052 rte_flow->drxq.hrxq.qp = priv->flow_drop_queue->qp;
1053 rte_flow->ibv_flow = ibv_create_flow(rte_flow->drxq.hrxq.qp,
1054 rte_flow->ibv_attr);
1055 if (!rte_flow->ibv_flow) {
1056 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1057 NULL, "flow rule creation failure");
1068 * Complete flow rule creation.
1071 * Pointer to private structure.
1073 * MLX5 flow attributes (filled by mlx5_flow_validate()).
1075 * Perform verbose error reporting if not NULL.
1078 * A flow if the rule could be created.
1080 static struct rte_flow *
1081 priv_flow_create_action_queue(struct priv *priv,
1082 struct mlx5_flow_parse *flow,
1083 struct rte_flow_error *error)
1085 struct rte_flow *rte_flow;
1090 assert(!flow->actions.drop);
1092 rte_calloc(__func__, 1,
1094 flow->actions.queues_n * sizeof(uint16_t),
1097 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1098 NULL, "cannot allocate flow memory");
1101 rte_flow->mark = flow->actions.mark;
1102 rte_flow->ibv_attr = flow->ibv_attr;
1103 rte_flow->queues = (uint16_t (*)[])(rte_flow + 1);
1104 memcpy(rte_flow->queues, flow->actions.queues,
1105 flow->actions.queues_n * sizeof(uint16_t));
1106 rte_flow->queues_n = flow->actions.queues_n;
1107 rte_flow->frxq.hash_fields = flow->hash_fields;
1108 rte_flow->frxq.hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1109 rss_hash_default_key_len,
1111 (*rte_flow->queues),
1112 rte_flow->queues_n);
1113 if (!rte_flow->frxq.hrxq) {
1114 rte_flow->frxq.hrxq =
1115 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1116 rss_hash_default_key_len,
1118 (*rte_flow->queues),
1119 rte_flow->queues_n);
1120 if (!rte_flow->frxq.hrxq) {
1121 rte_flow_error_set(error, ENOMEM,
1122 RTE_FLOW_ERROR_TYPE_HANDLE,
1123 NULL, "cannot create hash rxq");
1127 for (i = 0; i != flow->actions.queues_n; ++i) {
1128 struct mlx5_rxq_data *q =
1129 (*priv->rxqs)[flow->actions.queues[i]];
1131 q->mark |= flow->actions.mark;
1133 if (!priv->dev->data->dev_started)
1135 rte_flow->ibv_flow = ibv_create_flow(rte_flow->frxq.hrxq->qp,
1136 rte_flow->ibv_attr);
1137 if (!rte_flow->ibv_flow) {
1138 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1139 NULL, "flow rule creation failure");
1145 if (rte_flow->frxq.hrxq)
1146 mlx5_priv_hrxq_release(priv, rte_flow->frxq.hrxq);
1155 * Pointer to private structure.
1157 * Flow rule attributes.
1158 * @param[in] pattern
1159 * Pattern specification (list terminated by the END pattern item).
1160 * @param[in] actions
1161 * Associated actions (list terminated by the END action).
1163 * Perform verbose error reporting if not NULL.
1164 * @param[in,out] parser
1165 * MLX5 parser structure.
1168 * 0 on success, negative errno value on failure.
1171 priv_flow_validate(struct priv *priv,
1172 const struct rte_flow_attr *attr,
1173 const struct rte_flow_item items[],
1174 const struct rte_flow_action actions[],
1175 struct rte_flow_error *error,
1176 struct mlx5_flow_parse *parser)
1180 err = priv_flow_convert(priv, attr, items, actions, error, parser);
1183 if (parser->actions.mark)
1184 parser->offset += sizeof(struct ibv_flow_spec_action_tag);
1185 parser->ibv_attr = rte_malloc(__func__, parser->offset, 0);
1186 if (!parser->ibv_attr) {
1187 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1188 NULL, "cannot allocate ibv_attr memory");
1192 *parser->ibv_attr = (struct ibv_flow_attr){
1193 .type = IBV_FLOW_ATTR_NORMAL,
1194 .size = sizeof(struct ibv_flow_attr),
1195 .priority = attr->priority,
1200 err = priv_flow_convert(priv, attr, items, actions, error, parser);
1201 if (err || parser->create)
1203 if (parser->actions.mark)
1204 mlx5_flow_create_flag_mark(parser, parser->actions.mark_id);
1207 if (parser->ibv_attr)
1208 rte_free(parser->ibv_attr);
1216 * Pointer to private structure.
1218 * Pointer to a TAILQ flow list.
1220 * Flow rule attributes.
1221 * @param[in] pattern
1222 * Pattern specification (list terminated by the END pattern item).
1223 * @param[in] actions
1224 * Associated actions (list terminated by the END action).
1226 * Perform verbose error reporting if not NULL.
1229 * A flow on success, NULL otherwise.
1231 static struct rte_flow *
1232 priv_flow_create(struct priv *priv,
1233 struct mlx5_flows *list,
1234 const struct rte_flow_attr *attr,
1235 const struct rte_flow_item items[],
1236 const struct rte_flow_action actions[],
1237 struct rte_flow_error *error)
1239 struct mlx5_flow_parse parser = { .create = 1, };
1240 struct rte_flow *flow;
1243 err = priv_flow_validate(priv, attr, items, actions, error, &parser);
1246 if (parser.actions.drop)
1247 flow = priv_flow_create_action_queue_drop(priv, &parser, error);
1249 flow = priv_flow_create_action_queue(priv, &parser, error);
1252 TAILQ_INSERT_TAIL(list, flow, next);
1253 DEBUG("Flow created %p", (void *)flow);
1256 if (parser.ibv_attr)
1257 rte_free(parser.ibv_attr);
1262 * Validate a flow supported by the NIC.
1264 * @see rte_flow_validate()
1268 mlx5_flow_validate(struct rte_eth_dev *dev,
1269 const struct rte_flow_attr *attr,
1270 const struct rte_flow_item items[],
1271 const struct rte_flow_action actions[],
1272 struct rte_flow_error *error)
1274 struct priv *priv = dev->data->dev_private;
1276 struct mlx5_flow_parse parser = { .create = 0, };
1279 ret = priv_flow_validate(priv, attr, items, actions, error, &parser);
1287 * @see rte_flow_create()
1291 mlx5_flow_create(struct rte_eth_dev *dev,
1292 const struct rte_flow_attr *attr,
1293 const struct rte_flow_item items[],
1294 const struct rte_flow_action actions[],
1295 struct rte_flow_error *error)
1297 struct priv *priv = dev->data->dev_private;
1298 struct rte_flow *flow;
1301 flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
1311 * Pointer to private structure.
1313 * Pointer to a TAILQ flow list.
1318 priv_flow_destroy(struct priv *priv,
1319 struct mlx5_flows *list,
1320 struct rte_flow *flow)
1326 if (flow->drop || !flow->mark)
1328 queues = flow->frxq.hrxq->ind_table->queues;
1329 queues_n = flow->frxq.hrxq->ind_table->queues_n;
1330 for (i = 0; i != queues_n; ++i) {
1331 struct rte_flow *tmp;
1332 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[i]];
1336 * To remove the mark from the queue, the queue must not be
1337 * present in any other marked flow (RSS or not).
1339 TAILQ_FOREACH(tmp, list, next) {
1345 (j != tmp->frxq.hrxq->ind_table->queues_n) &&
1348 if (tmp->frxq.hrxq->ind_table->queues[j] ==
1352 rxq_data->mark = mark;
1356 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1358 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1359 TAILQ_REMOVE(list, flow, next);
1360 rte_free(flow->ibv_attr);
1361 DEBUG("Flow destroyed %p", (void *)flow);
1368 * @see rte_flow_destroy()
1372 mlx5_flow_destroy(struct rte_eth_dev *dev,
1373 struct rte_flow *flow,
1374 struct rte_flow_error *error)
1376 struct priv *priv = dev->data->dev_private;
1380 priv_flow_destroy(priv, &priv->flows, flow);
1386 * Destroy all flows.
1389 * Pointer to private structure.
1391 * Pointer to a TAILQ flow list.
1394 priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
1396 while (!TAILQ_EMPTY(list)) {
1397 struct rte_flow *flow;
1399 flow = TAILQ_FIRST(list);
1400 priv_flow_destroy(priv, list, flow);
1405 * Destroy all flows.
1407 * @see rte_flow_flush()
1411 mlx5_flow_flush(struct rte_eth_dev *dev,
1412 struct rte_flow_error *error)
1414 struct priv *priv = dev->data->dev_private;
1418 priv_flow_flush(priv, &priv->flows);
1424 * Create drop queue.
1427 * Pointer to private structure.
1433 priv_flow_create_drop_queue(struct priv *priv)
1435 struct mlx5_hrxq_drop *fdq = NULL;
1439 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
1441 WARN("cannot allocate memory for drop queue");
1444 fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
1446 WARN("cannot allocate CQ for drop queue");
1449 fdq->wq = ibv_create_wq(priv->ctx,
1450 &(struct ibv_wq_init_attr){
1451 .wq_type = IBV_WQT_RQ,
1458 WARN("cannot allocate WQ for drop queue");
1461 fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
1462 &(struct ibv_rwq_ind_table_init_attr){
1463 .log_ind_tbl_size = 0,
1464 .ind_tbl = &fdq->wq,
1467 if (!fdq->ind_table) {
1468 WARN("cannot allocate indirection table for drop queue");
1471 fdq->qp = ibv_create_qp_ex(priv->ctx,
1472 &(struct ibv_qp_init_attr_ex){
1473 .qp_type = IBV_QPT_RAW_PACKET,
1475 IBV_QP_INIT_ATTR_PD |
1476 IBV_QP_INIT_ATTR_IND_TABLE |
1477 IBV_QP_INIT_ATTR_RX_HASH,
1478 .rx_hash_conf = (struct ibv_rx_hash_conf){
1480 IBV_RX_HASH_FUNC_TOEPLITZ,
1481 .rx_hash_key_len = rss_hash_default_key_len,
1482 .rx_hash_key = rss_hash_default_key,
1483 .rx_hash_fields_mask = 0,
1485 .rwq_ind_tbl = fdq->ind_table,
1489 WARN("cannot allocate QP for drop queue");
1492 priv->flow_drop_queue = fdq;
1496 claim_zero(ibv_destroy_qp(fdq->qp));
1498 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1500 claim_zero(ibv_destroy_wq(fdq->wq));
1502 claim_zero(ibv_destroy_cq(fdq->cq));
1505 priv->flow_drop_queue = NULL;
1510 * Delete drop queue.
1513 * Pointer to private structure.
1516 priv_flow_delete_drop_queue(struct priv *priv)
1518 struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
1523 claim_zero(ibv_destroy_qp(fdq->qp));
1525 claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
1527 claim_zero(ibv_destroy_wq(fdq->wq));
1529 claim_zero(ibv_destroy_cq(fdq->cq));
1531 priv->flow_drop_queue = NULL;
1538 * Pointer to private structure.
1540 * Pointer to a TAILQ flow list.
1543 priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
1545 struct rte_flow *flow;
1547 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
1548 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1549 flow->ibv_flow = NULL;
1550 mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
1551 flow->frxq.hrxq = NULL;
1554 struct mlx5_ind_table_ibv *ind_tbl =
1555 flow->frxq.hrxq->ind_table;
1557 for (n = 0; n < ind_tbl->queues_n; ++n)
1558 (*priv->rxqs)[ind_tbl->queues[n]]->mark = 0;
1560 DEBUG("Flow %p removed", (void *)flow);
1568 * Pointer to private structure.
1570 * Pointer to a TAILQ flow list.
1573 * 0 on success, a errno value otherwise and rte_errno is set.
1576 priv_flow_start(struct priv *priv, struct mlx5_flows *list)
1578 struct rte_flow *flow;
1580 TAILQ_FOREACH(flow, list, next) {
1581 if (flow->frxq.hrxq)
1584 mlx5_priv_hrxq_get(priv, rss_hash_default_key,
1585 rss_hash_default_key_len,
1586 flow->frxq.hash_fields,
1589 if (flow->frxq.hrxq)
1592 mlx5_priv_hrxq_new(priv, rss_hash_default_key,
1593 rss_hash_default_key_len,
1594 flow->frxq.hash_fields,
1597 if (!flow->frxq.hrxq) {
1598 DEBUG("Flow %p cannot be applied",
1604 flow->ibv_flow = ibv_create_flow(flow->frxq.hrxq->qp,
1606 if (!flow->ibv_flow) {
1607 DEBUG("Flow %p cannot be applied", (void *)flow);
1611 DEBUG("Flow %p applied", (void *)flow);
1616 n < flow->frxq.hrxq->ind_table->queues_n;
1619 flow->frxq.hrxq->ind_table->queues[n];
1620 (*priv->rxqs)[idx]->mark = 1;
1630 * @see rte_flow_isolate()
1634 mlx5_flow_isolate(struct rte_eth_dev *dev,
1636 struct rte_flow_error *error)
1638 struct priv *priv = dev->data->dev_private;
1641 if (dev->data->dev_started) {
1642 rte_flow_error_set(error, EBUSY,
1643 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1645 "port must be stopped first");
1649 priv->isolated = !!enable;
1655 * Verify the flow list is empty
1658 * Pointer to private structure.
1660 * @return the number of flows not released.
1663 priv_flow_verify(struct priv *priv)
1665 struct rte_flow *flow;
1668 TAILQ_FOREACH(flow, &priv->flows, next) {
1669 DEBUG("%p: flow %p still referenced", (void *)priv,
1677 * Enable a control flow configured from the control plane.
1680 * Pointer to Ethernet device.
1682 * An Ethernet flow spec to apply.
1684 * An Ethernet flow mask to apply.
1686 * A VLAN flow spec to apply.
1688 * A VLAN flow mask to apply.
1694 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1695 struct rte_flow_item_eth *eth_spec,
1696 struct rte_flow_item_eth *eth_mask,
1697 struct rte_flow_item_vlan *vlan_spec,
1698 struct rte_flow_item_vlan *vlan_mask)
1700 struct priv *priv = dev->data->dev_private;
1701 const struct rte_flow_attr attr = {
1703 .priority = MLX5_CTRL_FLOW_PRIORITY,
1705 struct rte_flow_item items[] = {
1707 .type = RTE_FLOW_ITEM_TYPE_ETH,
1713 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
1714 RTE_FLOW_ITEM_TYPE_END,
1720 .type = RTE_FLOW_ITEM_TYPE_END,
1723 struct rte_flow_action actions[] = {
1725 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1726 .conf = &(struct rte_flow_action_queue){
1731 .type = RTE_FLOW_ACTION_TYPE_END,
1734 struct rte_flow *flow;
1735 struct rte_flow_error error;
1737 flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
1745 * Enable a flow control configured from the control plane.
1748 * Pointer to Ethernet device.
1750 * An Ethernet flow spec to apply.
1752 * An Ethernet flow mask to apply.
1758 mlx5_ctrl_flow(struct rte_eth_dev *dev,
1759 struct rte_flow_item_eth *eth_spec,
1760 struct rte_flow_item_eth *eth_mask)
1762 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);