4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_flow_driver.h>
38 #include <rte_malloc.h>
42 #include "mlx4_flow.h"
43 #include "mlx4_rxtx.h"
44 #include "mlx4_utils.h"
46 /** Static initializer for items. */
48 (const enum rte_flow_item_type []){ \
49 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
52 /** Structure to generate a simple graph of layers supported by the NIC. */
53 struct mlx4_flow_items {
54 /** List of possible actions for these items. */
55 const enum rte_flow_action_type *const actions;
56 /** Bit-masks corresponding to the possibilities for the item. */
59 * Default bit-masks to use when item->mask is not provided. When
60 * \default_mask is also NULL, the full supported bit-mask (\mask) is
63 const void *default_mask;
64 /** Bit-masks size in bytes. */
65 const unsigned int mask_sz;
67 * Check support for a given item.
72 * Bit-masks covering supported fields to compare with spec,
76 * Bit-Mask size in bytes.
79 * 0 on success, negative value otherwise.
81 int (*validate)(const struct rte_flow_item *item,
82 const uint8_t *mask, unsigned int size);
84 * Conversion function from rte_flow to NIC specific flow.
87 * rte_flow item to convert.
89 * Default bit-masks to use when item->mask is not provided.
91 * Internal structure to store the conversion.
94 * 0 on success, negative value otherwise.
96 int (*convert)(const struct rte_flow_item *item,
97 const void *default_mask,
99 /** Size in bytes of the destination structure. */
100 const unsigned int dst_sz;
101 /** List of possible following items. */
102 const enum rte_flow_item_type *const items;
105 struct rte_flow_drop {
106 struct ibv_qp *qp; /**< Verbs queue pair. */
107 struct ibv_cq *cq; /**< Verbs completion queue. */
110 /** Valid action for this PMD. */
111 static const enum rte_flow_action_type valid_actions[] = {
112 RTE_FLOW_ACTION_TYPE_DROP,
113 RTE_FLOW_ACTION_TYPE_QUEUE,
114 RTE_FLOW_ACTION_TYPE_END,
118 * Convert Ethernet item to Verbs specification.
121 * Item specification.
122 * @param default_mask[in]
123 * Default bit-masks to use when item->mask is not provided.
124 * @param data[in, out]
128 mlx4_flow_create_eth(const struct rte_flow_item *item,
129 const void *default_mask,
132 const struct rte_flow_item_eth *spec = item->spec;
133 const struct rte_flow_item_eth *mask = item->mask;
134 struct mlx4_flow *flow = (struct mlx4_flow *)data;
135 struct ibv_flow_spec_eth *eth;
136 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
139 ++flow->ibv_attr->num_of_specs;
140 flow->ibv_attr->priority = 2;
141 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
142 *eth = (struct ibv_flow_spec_eth) {
143 .type = IBV_FLOW_SPEC_ETH,
147 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
152 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
153 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
154 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
155 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
156 /* Remove unwanted bits from values. */
157 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
158 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
159 eth->val.src_mac[i] &= eth->mask.src_mac[i];
165 * Convert VLAN item to Verbs specification.
168 * Item specification.
169 * @param default_mask[in]
170 * Default bit-masks to use when item->mask is not provided.
171 * @param data[in, out]
175 mlx4_flow_create_vlan(const struct rte_flow_item *item,
176 const void *default_mask,
179 const struct rte_flow_item_vlan *spec = item->spec;
180 const struct rte_flow_item_vlan *mask = item->mask;
181 struct mlx4_flow *flow = (struct mlx4_flow *)data;
182 struct ibv_flow_spec_eth *eth;
183 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
185 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
190 eth->val.vlan_tag = spec->tci;
191 eth->mask.vlan_tag = mask->tci;
192 eth->val.vlan_tag &= eth->mask.vlan_tag;
197 * Convert IPv4 item to Verbs specification.
200 * Item specification.
201 * @param default_mask[in]
202 * Default bit-masks to use when item->mask is not provided.
203 * @param data[in, out]
207 mlx4_flow_create_ipv4(const struct rte_flow_item *item,
208 const void *default_mask,
211 const struct rte_flow_item_ipv4 *spec = item->spec;
212 const struct rte_flow_item_ipv4 *mask = item->mask;
213 struct mlx4_flow *flow = (struct mlx4_flow *)data;
214 struct ibv_flow_spec_ipv4 *ipv4;
215 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
217 ++flow->ibv_attr->num_of_specs;
218 flow->ibv_attr->priority = 1;
219 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
220 *ipv4 = (struct ibv_flow_spec_ipv4) {
221 .type = IBV_FLOW_SPEC_IPV4,
226 ipv4->val = (struct ibv_flow_ipv4_filter) {
227 .src_ip = spec->hdr.src_addr,
228 .dst_ip = spec->hdr.dst_addr,
232 ipv4->mask = (struct ibv_flow_ipv4_filter) {
233 .src_ip = mask->hdr.src_addr,
234 .dst_ip = mask->hdr.dst_addr,
236 /* Remove unwanted bits from values. */
237 ipv4->val.src_ip &= ipv4->mask.src_ip;
238 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
243 * Convert UDP item to Verbs specification.
246 * Item specification.
247 * @param default_mask[in]
248 * Default bit-masks to use when item->mask is not provided.
249 * @param data[in, out]
253 mlx4_flow_create_udp(const struct rte_flow_item *item,
254 const void *default_mask,
257 const struct rte_flow_item_udp *spec = item->spec;
258 const struct rte_flow_item_udp *mask = item->mask;
259 struct mlx4_flow *flow = (struct mlx4_flow *)data;
260 struct ibv_flow_spec_tcp_udp *udp;
261 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
263 ++flow->ibv_attr->num_of_specs;
264 flow->ibv_attr->priority = 0;
265 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
266 *udp = (struct ibv_flow_spec_tcp_udp) {
267 .type = IBV_FLOW_SPEC_UDP,
272 udp->val.dst_port = spec->hdr.dst_port;
273 udp->val.src_port = spec->hdr.src_port;
276 udp->mask.dst_port = mask->hdr.dst_port;
277 udp->mask.src_port = mask->hdr.src_port;
278 /* Remove unwanted bits from values. */
279 udp->val.src_port &= udp->mask.src_port;
280 udp->val.dst_port &= udp->mask.dst_port;
285 * Convert TCP item to Verbs specification.
288 * Item specification.
289 * @param default_mask[in]
290 * Default bit-masks to use when item->mask is not provided.
291 * @param data[in, out]
295 mlx4_flow_create_tcp(const struct rte_flow_item *item,
296 const void *default_mask,
299 const struct rte_flow_item_tcp *spec = item->spec;
300 const struct rte_flow_item_tcp *mask = item->mask;
301 struct mlx4_flow *flow = (struct mlx4_flow *)data;
302 struct ibv_flow_spec_tcp_udp *tcp;
303 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
305 ++flow->ibv_attr->num_of_specs;
306 flow->ibv_attr->priority = 0;
307 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
308 *tcp = (struct ibv_flow_spec_tcp_udp) {
309 .type = IBV_FLOW_SPEC_TCP,
314 tcp->val.dst_port = spec->hdr.dst_port;
315 tcp->val.src_port = spec->hdr.src_port;
318 tcp->mask.dst_port = mask->hdr.dst_port;
319 tcp->mask.src_port = mask->hdr.src_port;
320 /* Remove unwanted bits from values. */
321 tcp->val.src_port &= tcp->mask.src_port;
322 tcp->val.dst_port &= tcp->mask.dst_port;
327 * Check support for a given item.
330 * Item specification.
332 * Bit-masks covering supported fields to compare with spec, last and mask in
335 * Bit-Mask size in bytes.
338 * 0 on success, negative value otherwise.
341 mlx4_flow_item_validate(const struct rte_flow_item *item,
342 const uint8_t *mask, unsigned int size)
346 if (!item->spec && (item->mask || item->last))
348 if (item->spec && !item->mask) {
350 const uint8_t *spec = item->spec;
352 for (i = 0; i < size; ++i)
353 if ((spec[i] | mask[i]) != mask[i])
356 if (item->last && !item->mask) {
358 const uint8_t *spec = item->last;
360 for (i = 0; i < size; ++i)
361 if ((spec[i] | mask[i]) != mask[i])
364 if (item->spec && item->last) {
367 const uint8_t *apply = mask;
372 for (i = 0; i < size; ++i) {
373 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
374 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
376 ret = memcmp(spec, last, size);
382 mlx4_flow_validate_eth(const struct rte_flow_item *item,
383 const uint8_t *mask, unsigned int size)
386 const struct rte_flow_item_eth *mask = item->mask;
388 if (mask->dst.addr_bytes[0] != 0xff ||
389 mask->dst.addr_bytes[1] != 0xff ||
390 mask->dst.addr_bytes[2] != 0xff ||
391 mask->dst.addr_bytes[3] != 0xff ||
392 mask->dst.addr_bytes[4] != 0xff ||
393 mask->dst.addr_bytes[5] != 0xff)
396 return mlx4_flow_item_validate(item, mask, size);
400 mlx4_flow_validate_vlan(const struct rte_flow_item *item,
401 const uint8_t *mask, unsigned int size)
404 const struct rte_flow_item_vlan *mask = item->mask;
406 if (mask->tci != 0 &&
407 ntohs(mask->tci) != 0x0fff)
410 return mlx4_flow_item_validate(item, mask, size);
414 mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
415 const uint8_t *mask, unsigned int size)
418 const struct rte_flow_item_ipv4 *mask = item->mask;
420 if (mask->hdr.src_addr != 0 &&
421 mask->hdr.src_addr != 0xffffffff)
423 if (mask->hdr.dst_addr != 0 &&
424 mask->hdr.dst_addr != 0xffffffff)
427 return mlx4_flow_item_validate(item, mask, size);
431 mlx4_flow_validate_udp(const struct rte_flow_item *item,
432 const uint8_t *mask, unsigned int size)
435 const struct rte_flow_item_udp *mask = item->mask;
437 if (mask->hdr.src_port != 0 &&
438 mask->hdr.src_port != 0xffff)
440 if (mask->hdr.dst_port != 0 &&
441 mask->hdr.dst_port != 0xffff)
444 return mlx4_flow_item_validate(item, mask, size);
448 mlx4_flow_validate_tcp(const struct rte_flow_item *item,
449 const uint8_t *mask, unsigned int size)
452 const struct rte_flow_item_tcp *mask = item->mask;
454 if (mask->hdr.src_port != 0 &&
455 mask->hdr.src_port != 0xffff)
457 if (mask->hdr.dst_port != 0 &&
458 mask->hdr.dst_port != 0xffff)
461 return mlx4_flow_item_validate(item, mask, size);
464 /** Graph of supported items and associated actions. */
465 static const struct mlx4_flow_items mlx4_flow_items[] = {
466 [RTE_FLOW_ITEM_TYPE_END] = {
467 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
469 [RTE_FLOW_ITEM_TYPE_ETH] = {
470 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
471 RTE_FLOW_ITEM_TYPE_IPV4),
472 .actions = valid_actions,
473 .mask = &(const struct rte_flow_item_eth){
474 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
475 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
477 .default_mask = &rte_flow_item_eth_mask,
478 .mask_sz = sizeof(struct rte_flow_item_eth),
479 .validate = mlx4_flow_validate_eth,
480 .convert = mlx4_flow_create_eth,
481 .dst_sz = sizeof(struct ibv_flow_spec_eth),
483 [RTE_FLOW_ITEM_TYPE_VLAN] = {
484 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
485 .actions = valid_actions,
486 .mask = &(const struct rte_flow_item_vlan){
487 /* rte_flow_item_vlan_mask is invalid for mlx4. */
488 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
494 .mask_sz = sizeof(struct rte_flow_item_vlan),
495 .validate = mlx4_flow_validate_vlan,
496 .convert = mlx4_flow_create_vlan,
499 [RTE_FLOW_ITEM_TYPE_IPV4] = {
500 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
501 RTE_FLOW_ITEM_TYPE_TCP),
502 .actions = valid_actions,
503 .mask = &(const struct rte_flow_item_ipv4){
509 .default_mask = &rte_flow_item_ipv4_mask,
510 .mask_sz = sizeof(struct rte_flow_item_ipv4),
511 .validate = mlx4_flow_validate_ipv4,
512 .convert = mlx4_flow_create_ipv4,
513 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
515 [RTE_FLOW_ITEM_TYPE_UDP] = {
516 .actions = valid_actions,
517 .mask = &(const struct rte_flow_item_udp){
523 .default_mask = &rte_flow_item_udp_mask,
524 .mask_sz = sizeof(struct rte_flow_item_udp),
525 .validate = mlx4_flow_validate_udp,
526 .convert = mlx4_flow_create_udp,
527 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
529 [RTE_FLOW_ITEM_TYPE_TCP] = {
530 .actions = valid_actions,
531 .mask = &(const struct rte_flow_item_tcp){
537 .default_mask = &rte_flow_item_tcp_mask,
538 .mask_sz = sizeof(struct rte_flow_item_tcp),
539 .validate = mlx4_flow_validate_tcp,
540 .convert = mlx4_flow_create_tcp,
541 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
546 * Validate a flow supported by the NIC.
549 * Pointer to private structure.
551 * Flow rule attributes.
553 * Pattern specification (list terminated by the END pattern item).
555 * Associated actions (list terminated by the END action).
557 * Perform verbose error reporting if not NULL.
558 * @param[in, out] flow
559 * Flow structure to update.
562 * 0 on success, a negative errno value otherwise and rte_errno is set.
565 priv_flow_validate(struct priv *priv,
566 const struct rte_flow_attr *attr,
567 const struct rte_flow_item items[],
568 const struct rte_flow_action actions[],
569 struct rte_flow_error *error,
570 struct mlx4_flow *flow)
572 const struct mlx4_flow_items *cur_item = mlx4_flow_items;
573 struct mlx4_flow_action action = {
580 rte_flow_error_set(error, ENOTSUP,
581 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
583 "groups are not supported");
586 if (attr->priority) {
587 rte_flow_error_set(error, ENOTSUP,
588 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
590 "priorities are not supported");
594 rte_flow_error_set(error, ENOTSUP,
595 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
597 "egress is not supported");
600 if (!attr->ingress) {
601 rte_flow_error_set(error, ENOTSUP,
602 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
604 "only ingress is supported");
607 /* Go over items list. */
608 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
609 const struct mlx4_flow_items *token = NULL;
613 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
616 * The nic can support patterns with NULL eth spec only
617 * if eth is a single item in a rule.
620 items->type == RTE_FLOW_ITEM_TYPE_ETH) {
621 const struct rte_flow_item *next = items + 1;
623 if (next->type != RTE_FLOW_ITEM_TYPE_END) {
624 rte_flow_error_set(error, ENOTSUP,
625 RTE_FLOW_ERROR_TYPE_ITEM,
628 " an Ethernet spec");
634 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
636 if (cur_item->items[i] == items->type) {
637 token = &mlx4_flow_items[items->type];
642 goto exit_item_not_supported;
644 err = cur_item->validate(items,
645 (const uint8_t *)cur_item->mask,
648 goto exit_item_not_supported;
649 if (flow->ibv_attr && cur_item->convert) {
650 err = cur_item->convert(items,
651 (cur_item->default_mask ?
652 cur_item->default_mask :
656 goto exit_item_not_supported;
658 flow->offset += cur_item->dst_sz;
660 /* Go over actions list */
661 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
662 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
664 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
666 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
667 const struct rte_flow_action_queue *queue =
668 (const struct rte_flow_action_queue *)
671 if (!queue || (queue->index > (priv->rxqs_n - 1)))
672 goto exit_action_not_supported;
675 goto exit_action_not_supported;
678 if (!action.queue && !action.drop) {
679 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
680 NULL, "no valid action");
684 exit_item_not_supported:
685 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
686 items, "item not supported");
688 exit_action_not_supported:
689 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
690 actions, "action not supported");
695 * Validate a flow supported by the NIC.
697 * @see rte_flow_validate()
701 mlx4_flow_validate(struct rte_eth_dev *dev,
702 const struct rte_flow_attr *attr,
703 const struct rte_flow_item items[],
704 const struct rte_flow_action actions[],
705 struct rte_flow_error *error)
707 struct priv *priv = dev->data->dev_private;
708 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
710 return priv_flow_validate(priv, attr, items, actions, error, &flow);
714 * Destroy a drop queue.
717 * Pointer to private structure.
720 mlx4_flow_destroy_drop_queue(struct priv *priv)
722 if (priv->flow_drop_queue) {
723 struct rte_flow_drop *fdq = priv->flow_drop_queue;
725 priv->flow_drop_queue = NULL;
726 claim_zero(ibv_destroy_qp(fdq->qp));
727 claim_zero(ibv_destroy_cq(fdq->cq));
733 * Create a single drop queue for all drop flows.
736 * Pointer to private structure.
739 * 0 on success, negative value otherwise.
742 mlx4_flow_create_drop_queue(struct priv *priv)
746 struct rte_flow_drop *fdq;
748 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
750 ERROR("Cannot allocate memory for drop struct");
753 cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
755 ERROR("Cannot create drop CQ");
758 qp = ibv_create_qp(priv->pd,
759 &(struct ibv_qp_init_attr){
766 .qp_type = IBV_QPT_RAW_PACKET,
769 ERROR("Cannot create drop QP");
772 *fdq = (struct rte_flow_drop){
776 priv->flow_drop_queue = fdq;
779 claim_zero(ibv_destroy_cq(cq));
787 * Complete flow rule creation.
790 * Pointer to private structure.
792 * Verbs flow attributes.
794 * Target action structure.
796 * Perform verbose error reporting if not NULL.
799 * A flow if the rule could be created.
801 static struct rte_flow *
802 priv_flow_create_action_queue(struct priv *priv,
803 struct ibv_flow_attr *ibv_attr,
804 struct mlx4_flow_action *action,
805 struct rte_flow_error *error)
808 struct rte_flow *rte_flow;
812 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
814 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
815 NULL, "cannot allocate flow memory");
819 qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
821 struct rxq *rxq = (*priv->rxqs)[action->queue_id];
826 rte_flow->ibv_attr = ibv_attr;
829 rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
830 if (!rte_flow->ibv_flow) {
831 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
832 NULL, "flow rule creation failure");
844 * @see rte_flow_create()
848 mlx4_flow_create(struct rte_eth_dev *dev,
849 const struct rte_flow_attr *attr,
850 const struct rte_flow_item items[],
851 const struct rte_flow_action actions[],
852 struct rte_flow_error *error)
854 struct priv *priv = dev->data->dev_private;
855 struct rte_flow *rte_flow;
856 struct mlx4_flow_action action;
857 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
860 err = priv_flow_validate(priv, attr, items, actions, error, &flow);
863 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
864 if (!flow.ibv_attr) {
865 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
866 NULL, "cannot allocate ibv_attr memory");
869 flow.offset = sizeof(struct ibv_flow_attr);
870 *flow.ibv_attr = (struct ibv_flow_attr){
872 .type = IBV_FLOW_ATTR_NORMAL,
873 .size = sizeof(struct ibv_flow_attr),
874 .priority = attr->priority,
879 claim_zero(priv_flow_validate(priv, attr, items, actions,
881 action = (struct mlx4_flow_action){
885 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
886 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
888 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
891 ((const struct rte_flow_action_queue *)
892 actions->conf)->index;
893 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
896 rte_flow_error_set(error, ENOTSUP,
897 RTE_FLOW_ERROR_TYPE_ACTION,
898 actions, "unsupported action");
902 rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
905 LIST_INSERT_HEAD(&priv->flows, rte_flow, next);
906 DEBUG("Flow created %p", (void *)rte_flow);
910 rte_free(flow.ibv_attr);
915 * @see rte_flow_isolate()
917 * Must be done before calling dev_configure().
920 * Pointer to the ethernet device structure.
922 * Nonzero to enter isolated mode, attempt to leave it otherwise.
924 * Perform verbose error reporting if not NULL. PMDs initialize this
925 * structure in case of error only.
928 * 0 on success, a negative value on error.
931 mlx4_flow_isolate(struct rte_eth_dev *dev,
933 struct rte_flow_error *error)
935 struct priv *priv = dev->data->dev_private;
938 rte_flow_error_set(error, ENOTSUP,
939 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
940 NULL, "isolated mode must be set"
941 " before configuring the device");
944 priv->isolated = !!enable;
951 * @see rte_flow_destroy()
955 mlx4_flow_destroy(struct rte_eth_dev *dev,
956 struct rte_flow *flow,
957 struct rte_flow_error *error)
961 LIST_REMOVE(flow, next);
963 claim_zero(ibv_destroy_flow(flow->ibv_flow));
964 rte_free(flow->ibv_attr);
965 DEBUG("Flow destroyed %p", (void *)flow);
973 * @see rte_flow_flush()
977 mlx4_flow_flush(struct rte_eth_dev *dev,
978 struct rte_flow_error *error)
980 struct priv *priv = dev->data->dev_private;
982 while (!LIST_EMPTY(&priv->flows)) {
983 struct rte_flow *flow;
985 flow = LIST_FIRST(&priv->flows);
986 mlx4_flow_destroy(dev, flow, error);
994 * Called by dev_stop() to remove all flows.
997 * Pointer to private structure.
1000 mlx4_priv_flow_stop(struct priv *priv)
1002 struct rte_flow *flow;
1004 for (flow = LIST_FIRST(&priv->flows);
1006 flow = LIST_NEXT(flow, next)) {
1007 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1008 flow->ibv_flow = NULL;
1009 DEBUG("Flow %p removed", (void *)flow);
1011 mlx4_flow_destroy_drop_queue(priv);
1018 * Pointer to private structure.
1021 * 0 on success, a errno value otherwise and rte_errno is set.
1024 mlx4_priv_flow_start(struct priv *priv)
1028 struct rte_flow *flow;
1030 ret = mlx4_flow_create_drop_queue(priv);
1033 for (flow = LIST_FIRST(&priv->flows);
1035 flow = LIST_NEXT(flow, next)) {
1036 qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
1037 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1038 if (!flow->ibv_flow) {
1039 DEBUG("Flow %p cannot be applied", (void *)flow);
1043 DEBUG("Flow %p applied", (void *)flow);