4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Flow API operations for mlx4 driver.
39 #include <arpa/inet.h>
45 #include <sys/queue.h>
47 /* Verbs headers do not support -pedantic. */
49 #pragma GCC diagnostic ignored "-Wpedantic"
51 #include <infiniband/verbs.h>
53 #pragma GCC diagnostic error "-Wpedantic"
56 #include <rte_errno.h>
57 #include <rte_eth_ctrl.h>
58 #include <rte_ethdev.h>
60 #include <rte_flow_driver.h>
61 #include <rte_malloc.h>
65 #include "mlx4_flow.h"
66 #include "mlx4_rxtx.h"
67 #include "mlx4_utils.h"
69 /** Static initializer for items. */
71 (const enum rte_flow_item_type []){ \
72 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
75 /** Structure to generate a simple graph of layers supported by the NIC. */
76 struct mlx4_flow_items {
77 /** List of possible actions for these items. */
78 const enum rte_flow_action_type *const actions;
79 /** Bit-masks corresponding to the possibilities for the item. */
82 * Default bit-masks to use when item->mask is not provided. When
83 * \default_mask is also NULL, the full supported bit-mask (\mask) is
86 const void *default_mask;
87 /** Bit-masks size in bytes. */
88 const unsigned int mask_sz;
90 * Check support for a given item.
95 * Bit-masks covering supported fields to compare with spec,
99 * Bit-Mask size in bytes.
102 * 0 on success, negative value otherwise.
104 int (*validate)(const struct rte_flow_item *item,
105 const uint8_t *mask, unsigned int size);
107 * Conversion function from rte_flow to NIC specific flow.
110 * rte_flow item to convert.
111 * @param default_mask
112 * Default bit-masks to use when item->mask is not provided.
114 * Internal structure to store the conversion.
117 * 0 on success, negative value otherwise.
119 int (*convert)(const struct rte_flow_item *item,
120 const void *default_mask,
122 /** Size in bytes of the destination structure. */
123 const unsigned int dst_sz;
124 /** List of possible following items. */
125 const enum rte_flow_item_type *const items;
128 struct rte_flow_drop {
129 struct ibv_qp *qp; /**< Verbs queue pair. */
130 struct ibv_cq *cq; /**< Verbs completion queue. */
133 /** Valid action for this PMD. */
134 static const enum rte_flow_action_type valid_actions[] = {
135 RTE_FLOW_ACTION_TYPE_DROP,
136 RTE_FLOW_ACTION_TYPE_QUEUE,
137 RTE_FLOW_ACTION_TYPE_END,
141 * Convert Ethernet item to Verbs specification.
144 * Item specification.
145 * @param default_mask[in]
146 * Default bit-masks to use when item->mask is not provided.
147 * @param data[in, out]
151 mlx4_flow_create_eth(const struct rte_flow_item *item,
152 const void *default_mask,
155 const struct rte_flow_item_eth *spec = item->spec;
156 const struct rte_flow_item_eth *mask = item->mask;
157 struct mlx4_flow *flow = (struct mlx4_flow *)data;
158 struct ibv_flow_spec_eth *eth;
159 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
162 ++flow->ibv_attr->num_of_specs;
163 flow->ibv_attr->priority = 2;
164 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
165 *eth = (struct ibv_flow_spec_eth) {
166 .type = IBV_FLOW_SPEC_ETH,
170 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
175 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
176 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
177 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
178 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
179 /* Remove unwanted bits from values. */
180 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
181 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
182 eth->val.src_mac[i] &= eth->mask.src_mac[i];
188 * Convert VLAN item to Verbs specification.
191 * Item specification.
192 * @param default_mask[in]
193 * Default bit-masks to use when item->mask is not provided.
194 * @param data[in, out]
198 mlx4_flow_create_vlan(const struct rte_flow_item *item,
199 const void *default_mask,
202 const struct rte_flow_item_vlan *spec = item->spec;
203 const struct rte_flow_item_vlan *mask = item->mask;
204 struct mlx4_flow *flow = (struct mlx4_flow *)data;
205 struct ibv_flow_spec_eth *eth;
206 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
208 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
213 eth->val.vlan_tag = spec->tci;
214 eth->mask.vlan_tag = mask->tci;
215 eth->val.vlan_tag &= eth->mask.vlan_tag;
220 * Convert IPv4 item to Verbs specification.
223 * Item specification.
224 * @param default_mask[in]
225 * Default bit-masks to use when item->mask is not provided.
226 * @param data[in, out]
230 mlx4_flow_create_ipv4(const struct rte_flow_item *item,
231 const void *default_mask,
234 const struct rte_flow_item_ipv4 *spec = item->spec;
235 const struct rte_flow_item_ipv4 *mask = item->mask;
236 struct mlx4_flow *flow = (struct mlx4_flow *)data;
237 struct ibv_flow_spec_ipv4 *ipv4;
238 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
240 ++flow->ibv_attr->num_of_specs;
241 flow->ibv_attr->priority = 1;
242 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
243 *ipv4 = (struct ibv_flow_spec_ipv4) {
244 .type = IBV_FLOW_SPEC_IPV4,
249 ipv4->val = (struct ibv_flow_ipv4_filter) {
250 .src_ip = spec->hdr.src_addr,
251 .dst_ip = spec->hdr.dst_addr,
255 ipv4->mask = (struct ibv_flow_ipv4_filter) {
256 .src_ip = mask->hdr.src_addr,
257 .dst_ip = mask->hdr.dst_addr,
259 /* Remove unwanted bits from values. */
260 ipv4->val.src_ip &= ipv4->mask.src_ip;
261 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
266 * Convert UDP item to Verbs specification.
269 * Item specification.
270 * @param default_mask[in]
271 * Default bit-masks to use when item->mask is not provided.
272 * @param data[in, out]
276 mlx4_flow_create_udp(const struct rte_flow_item *item,
277 const void *default_mask,
280 const struct rte_flow_item_udp *spec = item->spec;
281 const struct rte_flow_item_udp *mask = item->mask;
282 struct mlx4_flow *flow = (struct mlx4_flow *)data;
283 struct ibv_flow_spec_tcp_udp *udp;
284 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
286 ++flow->ibv_attr->num_of_specs;
287 flow->ibv_attr->priority = 0;
288 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
289 *udp = (struct ibv_flow_spec_tcp_udp) {
290 .type = IBV_FLOW_SPEC_UDP,
295 udp->val.dst_port = spec->hdr.dst_port;
296 udp->val.src_port = spec->hdr.src_port;
299 udp->mask.dst_port = mask->hdr.dst_port;
300 udp->mask.src_port = mask->hdr.src_port;
301 /* Remove unwanted bits from values. */
302 udp->val.src_port &= udp->mask.src_port;
303 udp->val.dst_port &= udp->mask.dst_port;
308 * Convert TCP item to Verbs specification.
311 * Item specification.
312 * @param default_mask[in]
313 * Default bit-masks to use when item->mask is not provided.
314 * @param data[in, out]
318 mlx4_flow_create_tcp(const struct rte_flow_item *item,
319 const void *default_mask,
322 const struct rte_flow_item_tcp *spec = item->spec;
323 const struct rte_flow_item_tcp *mask = item->mask;
324 struct mlx4_flow *flow = (struct mlx4_flow *)data;
325 struct ibv_flow_spec_tcp_udp *tcp;
326 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
328 ++flow->ibv_attr->num_of_specs;
329 flow->ibv_attr->priority = 0;
330 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
331 *tcp = (struct ibv_flow_spec_tcp_udp) {
332 .type = IBV_FLOW_SPEC_TCP,
337 tcp->val.dst_port = spec->hdr.dst_port;
338 tcp->val.src_port = spec->hdr.src_port;
341 tcp->mask.dst_port = mask->hdr.dst_port;
342 tcp->mask.src_port = mask->hdr.src_port;
343 /* Remove unwanted bits from values. */
344 tcp->val.src_port &= tcp->mask.src_port;
345 tcp->val.dst_port &= tcp->mask.dst_port;
350 * Check support for a given item.
353 * Item specification.
355 * Bit-masks covering supported fields to compare with spec, last and mask in
358 * Bit-Mask size in bytes.
361 * 0 on success, negative value otherwise.
364 mlx4_flow_item_validate(const struct rte_flow_item *item,
365 const uint8_t *mask, unsigned int size)
369 if (!item->spec && (item->mask || item->last))
371 if (item->spec && !item->mask) {
373 const uint8_t *spec = item->spec;
375 for (i = 0; i < size; ++i)
376 if ((spec[i] | mask[i]) != mask[i])
379 if (item->last && !item->mask) {
381 const uint8_t *spec = item->last;
383 for (i = 0; i < size; ++i)
384 if ((spec[i] | mask[i]) != mask[i])
387 if (item->spec && item->last) {
390 const uint8_t *apply = mask;
395 for (i = 0; i < size; ++i) {
396 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
397 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
399 ret = memcmp(spec, last, size);
405 mlx4_flow_validate_eth(const struct rte_flow_item *item,
406 const uint8_t *mask, unsigned int size)
409 const struct rte_flow_item_eth *mask = item->mask;
411 if (mask->dst.addr_bytes[0] != 0xff ||
412 mask->dst.addr_bytes[1] != 0xff ||
413 mask->dst.addr_bytes[2] != 0xff ||
414 mask->dst.addr_bytes[3] != 0xff ||
415 mask->dst.addr_bytes[4] != 0xff ||
416 mask->dst.addr_bytes[5] != 0xff)
419 return mlx4_flow_item_validate(item, mask, size);
423 mlx4_flow_validate_vlan(const struct rte_flow_item *item,
424 const uint8_t *mask, unsigned int size)
427 const struct rte_flow_item_vlan *mask = item->mask;
429 if (mask->tci != 0 &&
430 ntohs(mask->tci) != 0x0fff)
433 return mlx4_flow_item_validate(item, mask, size);
437 mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
438 const uint8_t *mask, unsigned int size)
441 const struct rte_flow_item_ipv4 *mask = item->mask;
443 if (mask->hdr.src_addr != 0 &&
444 mask->hdr.src_addr != 0xffffffff)
446 if (mask->hdr.dst_addr != 0 &&
447 mask->hdr.dst_addr != 0xffffffff)
450 return mlx4_flow_item_validate(item, mask, size);
454 mlx4_flow_validate_udp(const struct rte_flow_item *item,
455 const uint8_t *mask, unsigned int size)
458 const struct rte_flow_item_udp *mask = item->mask;
460 if (mask->hdr.src_port != 0 &&
461 mask->hdr.src_port != 0xffff)
463 if (mask->hdr.dst_port != 0 &&
464 mask->hdr.dst_port != 0xffff)
467 return mlx4_flow_item_validate(item, mask, size);
471 mlx4_flow_validate_tcp(const struct rte_flow_item *item,
472 const uint8_t *mask, unsigned int size)
475 const struct rte_flow_item_tcp *mask = item->mask;
477 if (mask->hdr.src_port != 0 &&
478 mask->hdr.src_port != 0xffff)
480 if (mask->hdr.dst_port != 0 &&
481 mask->hdr.dst_port != 0xffff)
484 return mlx4_flow_item_validate(item, mask, size);
487 /** Graph of supported items and associated actions. */
488 static const struct mlx4_flow_items mlx4_flow_items[] = {
489 [RTE_FLOW_ITEM_TYPE_END] = {
490 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
492 [RTE_FLOW_ITEM_TYPE_ETH] = {
493 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
494 RTE_FLOW_ITEM_TYPE_IPV4),
495 .actions = valid_actions,
496 .mask = &(const struct rte_flow_item_eth){
497 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
498 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
500 .default_mask = &rte_flow_item_eth_mask,
501 .mask_sz = sizeof(struct rte_flow_item_eth),
502 .validate = mlx4_flow_validate_eth,
503 .convert = mlx4_flow_create_eth,
504 .dst_sz = sizeof(struct ibv_flow_spec_eth),
506 [RTE_FLOW_ITEM_TYPE_VLAN] = {
507 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
508 .actions = valid_actions,
509 .mask = &(const struct rte_flow_item_vlan){
510 /* rte_flow_item_vlan_mask is invalid for mlx4. */
511 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
517 .mask_sz = sizeof(struct rte_flow_item_vlan),
518 .validate = mlx4_flow_validate_vlan,
519 .convert = mlx4_flow_create_vlan,
522 [RTE_FLOW_ITEM_TYPE_IPV4] = {
523 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
524 RTE_FLOW_ITEM_TYPE_TCP),
525 .actions = valid_actions,
526 .mask = &(const struct rte_flow_item_ipv4){
532 .default_mask = &rte_flow_item_ipv4_mask,
533 .mask_sz = sizeof(struct rte_flow_item_ipv4),
534 .validate = mlx4_flow_validate_ipv4,
535 .convert = mlx4_flow_create_ipv4,
536 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
538 [RTE_FLOW_ITEM_TYPE_UDP] = {
539 .actions = valid_actions,
540 .mask = &(const struct rte_flow_item_udp){
546 .default_mask = &rte_flow_item_udp_mask,
547 .mask_sz = sizeof(struct rte_flow_item_udp),
548 .validate = mlx4_flow_validate_udp,
549 .convert = mlx4_flow_create_udp,
550 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
552 [RTE_FLOW_ITEM_TYPE_TCP] = {
553 .actions = valid_actions,
554 .mask = &(const struct rte_flow_item_tcp){
560 .default_mask = &rte_flow_item_tcp_mask,
561 .mask_sz = sizeof(struct rte_flow_item_tcp),
562 .validate = mlx4_flow_validate_tcp,
563 .convert = mlx4_flow_create_tcp,
564 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
569 * Make sure a flow rule is supported and initialize associated structure.
572 * Pointer to private structure.
574 * Flow rule attributes.
576 * Pattern specification (list terminated by the END pattern item).
578 * Associated actions (list terminated by the END action).
580 * Perform verbose error reporting if not NULL.
581 * @param[in, out] flow
582 * Flow structure to update.
585 * 0 on success, a negative errno value otherwise and rte_errno is set.
588 mlx4_flow_prepare(struct priv *priv,
589 const struct rte_flow_attr *attr,
590 const struct rte_flow_item items[],
591 const struct rte_flow_action actions[],
592 struct rte_flow_error *error,
593 struct mlx4_flow *flow)
595 const struct mlx4_flow_items *cur_item = mlx4_flow_items;
596 struct mlx4_flow_action action = {
603 rte_flow_error_set(error, ENOTSUP,
604 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
606 "groups are not supported");
609 if (attr->priority) {
610 rte_flow_error_set(error, ENOTSUP,
611 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
613 "priorities are not supported");
617 rte_flow_error_set(error, ENOTSUP,
618 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
620 "egress is not supported");
623 if (!attr->ingress) {
624 rte_flow_error_set(error, ENOTSUP,
625 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
627 "only ingress is supported");
630 /* Go over items list. */
631 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
632 const struct mlx4_flow_items *token = NULL;
636 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
639 * The nic can support patterns with NULL eth spec only
640 * if eth is a single item in a rule.
643 items->type == RTE_FLOW_ITEM_TYPE_ETH) {
644 const struct rte_flow_item *next = items + 1;
646 if (next->type != RTE_FLOW_ITEM_TYPE_END) {
647 rte_flow_error_set(error, ENOTSUP,
648 RTE_FLOW_ERROR_TYPE_ITEM,
651 " an Ethernet spec");
657 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
659 if (cur_item->items[i] == items->type) {
660 token = &mlx4_flow_items[items->type];
665 goto exit_item_not_supported;
667 err = cur_item->validate(items,
668 (const uint8_t *)cur_item->mask,
671 goto exit_item_not_supported;
672 if (flow->ibv_attr && cur_item->convert) {
673 err = cur_item->convert(items,
674 (cur_item->default_mask ?
675 cur_item->default_mask :
679 goto exit_item_not_supported;
681 flow->offset += cur_item->dst_sz;
683 /* Go over actions list */
684 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
685 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
687 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
689 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
690 const struct rte_flow_action_queue *queue =
691 (const struct rte_flow_action_queue *)
694 if (!queue || (queue->index >
695 (priv->dev->data->nb_rx_queues - 1)))
696 goto exit_action_not_supported;
699 goto exit_action_not_supported;
702 if (!action.queue && !action.drop) {
703 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
704 NULL, "no valid action");
708 exit_item_not_supported:
709 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
710 items, "item not supported");
712 exit_action_not_supported:
713 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
714 actions, "action not supported");
719 * Validate a flow supported by the NIC.
721 * @see rte_flow_validate()
725 mlx4_flow_validate(struct rte_eth_dev *dev,
726 const struct rte_flow_attr *attr,
727 const struct rte_flow_item items[],
728 const struct rte_flow_action actions[],
729 struct rte_flow_error *error)
731 struct priv *priv = dev->data->dev_private;
732 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
734 return mlx4_flow_prepare(priv, attr, items, actions, error, &flow);
738 * Destroy a drop queue.
741 * Pointer to private structure.
744 mlx4_flow_destroy_drop_queue(struct priv *priv)
746 if (priv->flow_drop_queue) {
747 struct rte_flow_drop *fdq = priv->flow_drop_queue;
749 priv->flow_drop_queue = NULL;
750 claim_zero(ibv_destroy_qp(fdq->qp));
751 claim_zero(ibv_destroy_cq(fdq->cq));
757 * Create a single drop queue for all drop flows.
760 * Pointer to private structure.
763 * 0 on success, negative value otherwise.
766 mlx4_flow_create_drop_queue(struct priv *priv)
770 struct rte_flow_drop *fdq;
772 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
774 ERROR("Cannot allocate memory for drop struct");
777 cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
779 ERROR("Cannot create drop CQ");
782 qp = ibv_create_qp(priv->pd,
783 &(struct ibv_qp_init_attr){
790 .qp_type = IBV_QPT_RAW_PACKET,
793 ERROR("Cannot create drop QP");
796 *fdq = (struct rte_flow_drop){
800 priv->flow_drop_queue = fdq;
803 claim_zero(ibv_destroy_cq(cq));
811 * Complete flow rule creation.
814 * Pointer to private structure.
816 * Verbs flow attributes.
818 * Target action structure.
820 * Perform verbose error reporting if not NULL.
823 * A flow if the rule could be created.
825 static struct rte_flow *
826 mlx4_flow_create_action_queue(struct priv *priv,
827 struct ibv_flow_attr *ibv_attr,
828 struct mlx4_flow_action *action,
829 struct rte_flow_error *error)
832 struct rte_flow *rte_flow;
836 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
838 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
839 NULL, "cannot allocate flow memory");
843 qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
845 struct rxq *rxq = priv->dev->data->rx_queues[action->queue_id];
850 rte_flow->ibv_attr = ibv_attr;
853 rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
854 if (!rte_flow->ibv_flow) {
855 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
856 NULL, "flow rule creation failure");
868 * @see rte_flow_create()
871 static struct rte_flow *
872 mlx4_flow_create(struct rte_eth_dev *dev,
873 const struct rte_flow_attr *attr,
874 const struct rte_flow_item items[],
875 const struct rte_flow_action actions[],
876 struct rte_flow_error *error)
878 struct priv *priv = dev->data->dev_private;
879 struct rte_flow *rte_flow;
880 struct mlx4_flow_action action;
881 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
884 err = mlx4_flow_prepare(priv, attr, items, actions, error, &flow);
887 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
888 if (!flow.ibv_attr) {
889 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
890 NULL, "cannot allocate ibv_attr memory");
893 flow.offset = sizeof(struct ibv_flow_attr);
894 *flow.ibv_attr = (struct ibv_flow_attr){
896 .type = IBV_FLOW_ATTR_NORMAL,
897 .size = sizeof(struct ibv_flow_attr),
898 .priority = attr->priority,
903 claim_zero(mlx4_flow_prepare(priv, attr, items, actions,
905 action = (struct mlx4_flow_action){
909 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
910 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
912 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
915 ((const struct rte_flow_action_queue *)
916 actions->conf)->index;
917 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
920 rte_flow_error_set(error, ENOTSUP,
921 RTE_FLOW_ERROR_TYPE_ACTION,
922 actions, "unsupported action");
926 rte_flow = mlx4_flow_create_action_queue(priv, flow.ibv_attr,
929 LIST_INSERT_HEAD(&priv->flows, rte_flow, next);
930 DEBUG("Flow created %p", (void *)rte_flow);
934 rte_free(flow.ibv_attr);
939 * Configure isolated mode.
941 * @see rte_flow_isolate()
945 mlx4_flow_isolate(struct rte_eth_dev *dev,
947 struct rte_flow_error *error)
949 struct priv *priv = dev->data->dev_private;
951 if (!!enable == !!priv->isolated)
953 priv->isolated = !!enable;
955 mlx4_mac_addr_del(priv);
956 } else if (mlx4_mac_addr_add(priv) < 0) {
958 return -rte_flow_error_set(error, rte_errno,
959 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
960 NULL, "cannot leave isolated mode");
968 * @see rte_flow_destroy()
972 mlx4_flow_destroy(struct rte_eth_dev *dev,
973 struct rte_flow *flow,
974 struct rte_flow_error *error)
978 LIST_REMOVE(flow, next);
980 claim_zero(ibv_destroy_flow(flow->ibv_flow));
981 rte_free(flow->ibv_attr);
982 DEBUG("Flow destroyed %p", (void *)flow);
990 * @see rte_flow_flush()
994 mlx4_flow_flush(struct rte_eth_dev *dev,
995 struct rte_flow_error *error)
997 struct priv *priv = dev->data->dev_private;
999 while (!LIST_EMPTY(&priv->flows)) {
1000 struct rte_flow *flow;
1002 flow = LIST_FIRST(&priv->flows);
1003 mlx4_flow_destroy(dev, flow, error);
1011 * Called by dev_stop() to remove all flows.
1014 * Pointer to private structure.
1017 mlx4_flow_stop(struct priv *priv)
1019 struct rte_flow *flow;
1021 for (flow = LIST_FIRST(&priv->flows);
1023 flow = LIST_NEXT(flow, next)) {
1024 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1025 flow->ibv_flow = NULL;
1026 DEBUG("Flow %p removed", (void *)flow);
1028 mlx4_flow_destroy_drop_queue(priv);
1035 * Pointer to private structure.
1038 * 0 on success, a errno value otherwise and rte_errno is set.
1041 mlx4_flow_start(struct priv *priv)
1045 struct rte_flow *flow;
1047 ret = mlx4_flow_create_drop_queue(priv);
1050 for (flow = LIST_FIRST(&priv->flows);
1052 flow = LIST_NEXT(flow, next)) {
1053 qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
1054 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1055 if (!flow->ibv_flow) {
1056 DEBUG("Flow %p cannot be applied", (void *)flow);
1060 DEBUG("Flow %p applied", (void *)flow);
1065 static const struct rte_flow_ops mlx4_flow_ops = {
1066 .validate = mlx4_flow_validate,
1067 .create = mlx4_flow_create,
1068 .destroy = mlx4_flow_destroy,
1069 .flush = mlx4_flow_flush,
1070 .isolate = mlx4_flow_isolate,
1074 * Manage filter operations.
1077 * Pointer to Ethernet device structure.
1078 * @param filter_type
1081 * Operation to perform.
1083 * Pointer to operation-specific structure.
1086 * 0 on success, negative errno value otherwise and rte_errno is set.
1089 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1090 enum rte_filter_type filter_type,
1091 enum rte_filter_op filter_op,
1094 switch (filter_type) {
1095 case RTE_ETH_FILTER_GENERIC:
1096 if (filter_op != RTE_ETH_FILTER_GET)
1098 *(const void **)arg = &mlx4_flow_ops;
1101 ERROR("%p: filter type (%d) not supported",
1102 (void *)dev, filter_type);
1105 rte_errno = ENOTSUP;