4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_flow_driver.h>
38 #include <rte_malloc.h>
40 /* Generated configuration header. */
41 #include "mlx4_autoconf.h"
45 #include "mlx4_flow.h"
47 /** Static initializer for items. */
49 (const enum rte_flow_item_type []){ \
50 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
53 /** Structure to generate a simple graph of layers supported by the NIC. */
54 struct mlx4_flow_items {
55 /** List of possible actions for these items. */
56 const enum rte_flow_action_type *const actions;
57 /** Bit-masks corresponding to the possibilities for the item. */
60 * Default bit-masks to use when item->mask is not provided. When
61 * \default_mask is also NULL, the full supported bit-mask (\mask) is
64 const void *default_mask;
65 /** Bit-masks size in bytes. */
66 const unsigned int mask_sz;
68 * Check support for a given item.
73 * Bit-masks covering supported fields to compare with spec,
77 * Bit-Mask size in bytes.
80 * 0 on success, negative value otherwise.
82 int (*validate)(const struct rte_flow_item *item,
83 const uint8_t *mask, unsigned int size);
85 * Conversion function from rte_flow to NIC specific flow.
88 * rte_flow item to convert.
90 * Default bit-masks to use when item->mask is not provided.
92 * Internal structure to store the conversion.
95 * 0 on success, negative value otherwise.
97 int (*convert)(const struct rte_flow_item *item,
98 const void *default_mask,
100 /** Size in bytes of the destination structure. */
101 const unsigned int dst_sz;
102 /** List of possible following items. */
103 const enum rte_flow_item_type *const items;
106 struct rte_flow_drop {
107 struct ibv_qp *qp; /**< Verbs queue pair. */
108 struct ibv_cq *cq; /**< Verbs completion queue. */
111 /** Valid action for this PMD. */
112 static const enum rte_flow_action_type valid_actions[] = {
113 RTE_FLOW_ACTION_TYPE_DROP,
114 RTE_FLOW_ACTION_TYPE_QUEUE,
115 RTE_FLOW_ACTION_TYPE_END,
119 * Convert Ethernet item to Verbs specification.
122 * Item specification.
123 * @param default_mask[in]
124 * Default bit-masks to use when item->mask is not provided.
125 * @param data[in, out]
129 mlx4_flow_create_eth(const struct rte_flow_item *item,
130 const void *default_mask,
133 const struct rte_flow_item_eth *spec = item->spec;
134 const struct rte_flow_item_eth *mask = item->mask;
135 struct mlx4_flow *flow = (struct mlx4_flow *)data;
136 struct ibv_flow_spec_eth *eth;
137 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
140 ++flow->ibv_attr->num_of_specs;
141 flow->ibv_attr->priority = 2;
142 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
143 *eth = (struct ibv_flow_spec_eth) {
144 .type = IBV_FLOW_SPEC_ETH,
148 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
153 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
154 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
155 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
156 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
157 /* Remove unwanted bits from values. */
158 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
159 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
160 eth->val.src_mac[i] &= eth->mask.src_mac[i];
166 * Convert VLAN item to Verbs specification.
169 * Item specification.
170 * @param default_mask[in]
171 * Default bit-masks to use when item->mask is not provided.
172 * @param data[in, out]
176 mlx4_flow_create_vlan(const struct rte_flow_item *item,
177 const void *default_mask,
180 const struct rte_flow_item_vlan *spec = item->spec;
181 const struct rte_flow_item_vlan *mask = item->mask;
182 struct mlx4_flow *flow = (struct mlx4_flow *)data;
183 struct ibv_flow_spec_eth *eth;
184 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
186 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
191 eth->val.vlan_tag = spec->tci;
192 eth->mask.vlan_tag = mask->tci;
193 eth->val.vlan_tag &= eth->mask.vlan_tag;
198 * Convert IPv4 item to Verbs specification.
201 * Item specification.
202 * @param default_mask[in]
203 * Default bit-masks to use when item->mask is not provided.
204 * @param data[in, out]
208 mlx4_flow_create_ipv4(const struct rte_flow_item *item,
209 const void *default_mask,
212 const struct rte_flow_item_ipv4 *spec = item->spec;
213 const struct rte_flow_item_ipv4 *mask = item->mask;
214 struct mlx4_flow *flow = (struct mlx4_flow *)data;
215 struct ibv_flow_spec_ipv4 *ipv4;
216 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
218 ++flow->ibv_attr->num_of_specs;
219 flow->ibv_attr->priority = 1;
220 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
221 *ipv4 = (struct ibv_flow_spec_ipv4) {
222 .type = IBV_FLOW_SPEC_IPV4,
227 ipv4->val = (struct ibv_flow_ipv4_filter) {
228 .src_ip = spec->hdr.src_addr,
229 .dst_ip = spec->hdr.dst_addr,
233 ipv4->mask = (struct ibv_flow_ipv4_filter) {
234 .src_ip = mask->hdr.src_addr,
235 .dst_ip = mask->hdr.dst_addr,
237 /* Remove unwanted bits from values. */
238 ipv4->val.src_ip &= ipv4->mask.src_ip;
239 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
244 * Convert UDP item to Verbs specification.
247 * Item specification.
248 * @param default_mask[in]
249 * Default bit-masks to use when item->mask is not provided.
250 * @param data[in, out]
254 mlx4_flow_create_udp(const struct rte_flow_item *item,
255 const void *default_mask,
258 const struct rte_flow_item_udp *spec = item->spec;
259 const struct rte_flow_item_udp *mask = item->mask;
260 struct mlx4_flow *flow = (struct mlx4_flow *)data;
261 struct ibv_flow_spec_tcp_udp *udp;
262 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
264 ++flow->ibv_attr->num_of_specs;
265 flow->ibv_attr->priority = 0;
266 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
267 *udp = (struct ibv_flow_spec_tcp_udp) {
268 .type = IBV_FLOW_SPEC_UDP,
273 udp->val.dst_port = spec->hdr.dst_port;
274 udp->val.src_port = spec->hdr.src_port;
277 udp->mask.dst_port = mask->hdr.dst_port;
278 udp->mask.src_port = mask->hdr.src_port;
279 /* Remove unwanted bits from values. */
280 udp->val.src_port &= udp->mask.src_port;
281 udp->val.dst_port &= udp->mask.dst_port;
286 * Convert TCP item to Verbs specification.
289 * Item specification.
290 * @param default_mask[in]
291 * Default bit-masks to use when item->mask is not provided.
292 * @param data[in, out]
296 mlx4_flow_create_tcp(const struct rte_flow_item *item,
297 const void *default_mask,
300 const struct rte_flow_item_tcp *spec = item->spec;
301 const struct rte_flow_item_tcp *mask = item->mask;
302 struct mlx4_flow *flow = (struct mlx4_flow *)data;
303 struct ibv_flow_spec_tcp_udp *tcp;
304 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
306 ++flow->ibv_attr->num_of_specs;
307 flow->ibv_attr->priority = 0;
308 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
309 *tcp = (struct ibv_flow_spec_tcp_udp) {
310 .type = IBV_FLOW_SPEC_TCP,
315 tcp->val.dst_port = spec->hdr.dst_port;
316 tcp->val.src_port = spec->hdr.src_port;
319 tcp->mask.dst_port = mask->hdr.dst_port;
320 tcp->mask.src_port = mask->hdr.src_port;
321 /* Remove unwanted bits from values. */
322 tcp->val.src_port &= tcp->mask.src_port;
323 tcp->val.dst_port &= tcp->mask.dst_port;
328 * Check support for a given item.
331 * Item specification.
333 * Bit-masks covering supported fields to compare with spec, last and mask in
336 * Bit-Mask size in bytes.
339 * 0 on success, negative value otherwise.
342 mlx4_flow_item_validate(const struct rte_flow_item *item,
343 const uint8_t *mask, unsigned int size)
347 if (!item->spec && (item->mask || item->last))
349 if (item->spec && !item->mask) {
351 const uint8_t *spec = item->spec;
353 for (i = 0; i < size; ++i)
354 if ((spec[i] | mask[i]) != mask[i])
357 if (item->last && !item->mask) {
359 const uint8_t *spec = item->last;
361 for (i = 0; i < size; ++i)
362 if ((spec[i] | mask[i]) != mask[i])
365 if (item->spec && item->last) {
368 const uint8_t *apply = mask;
373 for (i = 0; i < size; ++i) {
374 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
375 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
377 ret = memcmp(spec, last, size);
383 mlx4_flow_validate_eth(const struct rte_flow_item *item,
384 const uint8_t *mask, unsigned int size)
387 const struct rte_flow_item_eth *mask = item->mask;
389 if (mask->dst.addr_bytes[0] != 0xff ||
390 mask->dst.addr_bytes[1] != 0xff ||
391 mask->dst.addr_bytes[2] != 0xff ||
392 mask->dst.addr_bytes[3] != 0xff ||
393 mask->dst.addr_bytes[4] != 0xff ||
394 mask->dst.addr_bytes[5] != 0xff)
397 return mlx4_flow_item_validate(item, mask, size);
401 mlx4_flow_validate_vlan(const struct rte_flow_item *item,
402 const uint8_t *mask, unsigned int size)
405 const struct rte_flow_item_vlan *mask = item->mask;
407 if (mask->tci != 0 &&
408 ntohs(mask->tci) != 0x0fff)
411 return mlx4_flow_item_validate(item, mask, size);
415 mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
416 const uint8_t *mask, unsigned int size)
419 const struct rte_flow_item_ipv4 *mask = item->mask;
421 if (mask->hdr.src_addr != 0 &&
422 mask->hdr.src_addr != 0xffffffff)
424 if (mask->hdr.dst_addr != 0 &&
425 mask->hdr.dst_addr != 0xffffffff)
428 return mlx4_flow_item_validate(item, mask, size);
432 mlx4_flow_validate_udp(const struct rte_flow_item *item,
433 const uint8_t *mask, unsigned int size)
436 const struct rte_flow_item_udp *mask = item->mask;
438 if (mask->hdr.src_port != 0 &&
439 mask->hdr.src_port != 0xffff)
441 if (mask->hdr.dst_port != 0 &&
442 mask->hdr.dst_port != 0xffff)
445 return mlx4_flow_item_validate(item, mask, size);
449 mlx4_flow_validate_tcp(const struct rte_flow_item *item,
450 const uint8_t *mask, unsigned int size)
453 const struct rte_flow_item_tcp *mask = item->mask;
455 if (mask->hdr.src_port != 0 &&
456 mask->hdr.src_port != 0xffff)
458 if (mask->hdr.dst_port != 0 &&
459 mask->hdr.dst_port != 0xffff)
462 return mlx4_flow_item_validate(item, mask, size);
465 /** Graph of supported items and associated actions. */
466 static const struct mlx4_flow_items mlx4_flow_items[] = {
467 [RTE_FLOW_ITEM_TYPE_END] = {
468 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
470 [RTE_FLOW_ITEM_TYPE_ETH] = {
471 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
472 RTE_FLOW_ITEM_TYPE_IPV4),
473 .actions = valid_actions,
474 .mask = &(const struct rte_flow_item_eth){
475 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
476 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
478 .default_mask = &rte_flow_item_eth_mask,
479 .mask_sz = sizeof(struct rte_flow_item_eth),
480 .validate = mlx4_flow_validate_eth,
481 .convert = mlx4_flow_create_eth,
482 .dst_sz = sizeof(struct ibv_flow_spec_eth),
484 [RTE_FLOW_ITEM_TYPE_VLAN] = {
485 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
486 .actions = valid_actions,
487 .mask = &(const struct rte_flow_item_vlan){
488 /* rte_flow_item_vlan_mask is invalid for mlx4. */
489 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
495 .mask_sz = sizeof(struct rte_flow_item_vlan),
496 .validate = mlx4_flow_validate_vlan,
497 .convert = mlx4_flow_create_vlan,
500 [RTE_FLOW_ITEM_TYPE_IPV4] = {
501 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
502 RTE_FLOW_ITEM_TYPE_TCP),
503 .actions = valid_actions,
504 .mask = &(const struct rte_flow_item_ipv4){
510 .default_mask = &rte_flow_item_ipv4_mask,
511 .mask_sz = sizeof(struct rte_flow_item_ipv4),
512 .validate = mlx4_flow_validate_ipv4,
513 .convert = mlx4_flow_create_ipv4,
514 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
516 [RTE_FLOW_ITEM_TYPE_UDP] = {
517 .actions = valid_actions,
518 .mask = &(const struct rte_flow_item_udp){
524 .default_mask = &rte_flow_item_udp_mask,
525 .mask_sz = sizeof(struct rte_flow_item_udp),
526 .validate = mlx4_flow_validate_udp,
527 .convert = mlx4_flow_create_udp,
528 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
530 [RTE_FLOW_ITEM_TYPE_TCP] = {
531 .actions = valid_actions,
532 .mask = &(const struct rte_flow_item_tcp){
538 .default_mask = &rte_flow_item_tcp_mask,
539 .mask_sz = sizeof(struct rte_flow_item_tcp),
540 .validate = mlx4_flow_validate_tcp,
541 .convert = mlx4_flow_create_tcp,
542 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
547 * Validate a flow supported by the NIC.
550 * Pointer to private structure.
552 * Flow rule attributes.
554 * Pattern specification (list terminated by the END pattern item).
556 * Associated actions (list terminated by the END action).
558 * Perform verbose error reporting if not NULL.
559 * @param[in, out] flow
560 * Flow structure to update.
563 * 0 on success, a negative errno value otherwise and rte_errno is set.
566 priv_flow_validate(struct priv *priv,
567 const struct rte_flow_attr *attr,
568 const struct rte_flow_item items[],
569 const struct rte_flow_action actions[],
570 struct rte_flow_error *error,
571 struct mlx4_flow *flow)
573 const struct mlx4_flow_items *cur_item = mlx4_flow_items;
574 struct mlx4_flow_action action = {
581 rte_flow_error_set(error, ENOTSUP,
582 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
584 "groups are not supported");
587 if (attr->priority) {
588 rte_flow_error_set(error, ENOTSUP,
589 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
591 "priorities are not supported");
595 rte_flow_error_set(error, ENOTSUP,
596 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
598 "egress is not supported");
601 if (!attr->ingress) {
602 rte_flow_error_set(error, ENOTSUP,
603 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
605 "only ingress is supported");
608 /* Go over items list. */
609 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
610 const struct mlx4_flow_items *token = NULL;
614 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
617 * The nic can support patterns with NULL eth spec only
618 * if eth is a single item in a rule.
621 items->type == RTE_FLOW_ITEM_TYPE_ETH) {
622 const struct rte_flow_item *next = items + 1;
624 if (next->type != RTE_FLOW_ITEM_TYPE_END) {
625 rte_flow_error_set(error, ENOTSUP,
626 RTE_FLOW_ERROR_TYPE_ITEM,
629 " an Ethernet spec");
635 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
637 if (cur_item->items[i] == items->type) {
638 token = &mlx4_flow_items[items->type];
643 goto exit_item_not_supported;
645 err = cur_item->validate(items,
646 (const uint8_t *)cur_item->mask,
649 goto exit_item_not_supported;
650 if (flow->ibv_attr && cur_item->convert) {
651 err = cur_item->convert(items,
652 (cur_item->default_mask ?
653 cur_item->default_mask :
657 goto exit_item_not_supported;
659 flow->offset += cur_item->dst_sz;
661 /* Go over actions list */
662 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
663 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
665 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
667 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
668 const struct rte_flow_action_queue *queue =
669 (const struct rte_flow_action_queue *)
672 if (!queue || (queue->index > (priv->rxqs_n - 1)))
673 goto exit_action_not_supported;
676 goto exit_action_not_supported;
679 if (!action.queue && !action.drop) {
680 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
681 NULL, "no valid action");
685 exit_item_not_supported:
686 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
687 items, "item not supported");
689 exit_action_not_supported:
690 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
691 actions, "action not supported");
696 * Validate a flow supported by the NIC.
698 * @see rte_flow_validate()
702 mlx4_flow_validate(struct rte_eth_dev *dev,
703 const struct rte_flow_attr *attr,
704 const struct rte_flow_item items[],
705 const struct rte_flow_action actions[],
706 struct rte_flow_error *error)
708 struct priv *priv = dev->data->dev_private;
710 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
713 ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
719 * Destroy a drop queue.
722 * Pointer to private structure.
725 mlx4_flow_destroy_drop_queue(struct priv *priv)
727 if (priv->flow_drop_queue) {
728 struct rte_flow_drop *fdq = priv->flow_drop_queue;
730 priv->flow_drop_queue = NULL;
731 claim_zero(ibv_destroy_qp(fdq->qp));
732 claim_zero(ibv_destroy_cq(fdq->cq));
738 * Create a single drop queue for all drop flows.
741 * Pointer to private structure.
744 * 0 on success, negative value otherwise.
747 mlx4_flow_create_drop_queue(struct priv *priv)
751 struct rte_flow_drop *fdq;
753 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
755 ERROR("Cannot allocate memory for drop struct");
758 cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
759 &(struct ibv_exp_cq_init_attr){
763 ERROR("Cannot create drop CQ");
766 qp = ibv_exp_create_qp(priv->ctx,
767 &(struct ibv_exp_qp_init_attr){
774 .qp_type = IBV_QPT_RAW_PACKET,
776 IBV_EXP_QP_INIT_ATTR_PD |
777 IBV_EXP_QP_INIT_ATTR_PORT,
779 .port_num = priv->port,
782 ERROR("Cannot create drop QP");
785 *fdq = (struct rte_flow_drop){
789 priv->flow_drop_queue = fdq;
792 claim_zero(ibv_destroy_cq(cq));
800 * Complete flow rule creation.
803 * Pointer to private structure.
805 * Verbs flow attributes.
807 * Target action structure.
809 * Perform verbose error reporting if not NULL.
812 * A flow if the rule could be created.
814 static struct rte_flow *
815 priv_flow_create_action_queue(struct priv *priv,
816 struct ibv_flow_attr *ibv_attr,
817 struct mlx4_flow_action *action,
818 struct rte_flow_error *error)
821 struct rte_flow *rte_flow;
825 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
827 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
828 NULL, "cannot allocate flow memory");
832 qp = priv->flow_drop_queue->qp;
834 struct rxq *rxq = (*priv->rxqs)[action->queue_id];
839 rte_flow->ibv_attr = ibv_attr;
840 rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
841 if (!rte_flow->ibv_flow) {
842 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
843 NULL, "flow rule creation failure");
857 * Pointer to private structure.
859 * Flow rule attributes.
861 * Pattern specification (list terminated by the END pattern item).
863 * Associated actions (list terminated by the END action).
865 * Perform verbose error reporting if not NULL.
868 * A flow on success, NULL otherwise.
870 static struct rte_flow *
871 priv_flow_create(struct priv *priv,
872 const struct rte_flow_attr *attr,
873 const struct rte_flow_item items[],
874 const struct rte_flow_action actions[],
875 struct rte_flow_error *error)
877 struct rte_flow *rte_flow;
878 struct mlx4_flow_action action;
879 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
882 err = priv_flow_validate(priv, attr, items, actions, error, &flow);
885 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
886 if (!flow.ibv_attr) {
887 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
888 NULL, "cannot allocate ibv_attr memory");
891 flow.offset = sizeof(struct ibv_flow_attr);
892 *flow.ibv_attr = (struct ibv_flow_attr){
894 .type = IBV_FLOW_ATTR_NORMAL,
895 .size = sizeof(struct ibv_flow_attr),
896 .priority = attr->priority,
901 claim_zero(priv_flow_validate(priv, attr, items, actions,
903 action = (struct mlx4_flow_action){
907 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
908 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
910 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
913 ((const struct rte_flow_action_queue *)
914 actions->conf)->index;
915 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
918 rte_flow_error_set(error, ENOTSUP,
919 RTE_FLOW_ERROR_TYPE_ACTION,
920 actions, "unsupported action");
924 rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
929 rte_free(flow.ibv_attr);
936 * @see rte_flow_create()
940 mlx4_flow_create(struct rte_eth_dev *dev,
941 const struct rte_flow_attr *attr,
942 const struct rte_flow_item items[],
943 const struct rte_flow_action actions[],
944 struct rte_flow_error *error)
946 struct priv *priv = dev->data->dev_private;
947 struct rte_flow *flow;
950 flow = priv_flow_create(priv, attr, items, actions, error);
952 LIST_INSERT_HEAD(&priv->flows, flow, next);
953 DEBUG("Flow created %p", (void *)flow);
963 * Pointer to private structure.
968 priv_flow_destroy(struct priv *priv, struct rte_flow *flow)
971 LIST_REMOVE(flow, next);
973 claim_zero(ibv_destroy_flow(flow->ibv_flow));
974 rte_free(flow->ibv_attr);
975 DEBUG("Flow destroyed %p", (void *)flow);
982 * @see rte_flow_destroy()
986 mlx4_flow_destroy(struct rte_eth_dev *dev,
987 struct rte_flow *flow,
988 struct rte_flow_error *error)
990 struct priv *priv = dev->data->dev_private;
994 priv_flow_destroy(priv, flow);
1000 * Destroy all flows.
1003 * Pointer to private structure.
1006 priv_flow_flush(struct priv *priv)
1008 while (!LIST_EMPTY(&priv->flows)) {
1009 struct rte_flow *flow;
1011 flow = LIST_FIRST(&priv->flows);
1012 priv_flow_destroy(priv, flow);
1017 * Destroy all flows.
1019 * @see rte_flow_flush()
1023 mlx4_flow_flush(struct rte_eth_dev *dev,
1024 struct rte_flow_error *error)
1026 struct priv *priv = dev->data->dev_private;
1030 priv_flow_flush(priv);
1038 * Called by dev_stop() to remove all flows.
1041 * Pointer to private structure.
1044 mlx4_priv_flow_stop(struct priv *priv)
1046 struct rte_flow *flow;
1048 for (flow = LIST_FIRST(&priv->flows);
1050 flow = LIST_NEXT(flow, next)) {
1051 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1052 flow->ibv_flow = NULL;
1053 DEBUG("Flow %p removed", (void *)flow);
1055 mlx4_flow_destroy_drop_queue(priv);
1062 * Pointer to private structure.
1065 * 0 on success, a errno value otherwise and rte_errno is set.
1068 mlx4_priv_flow_start(struct priv *priv)
1072 struct rte_flow *flow;
1074 ret = mlx4_flow_create_drop_queue(priv);
1077 for (flow = LIST_FIRST(&priv->flows);
1079 flow = LIST_NEXT(flow, next)) {
1080 qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
1081 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1082 if (!flow->ibv_flow) {
1083 DEBUG("Flow %p cannot be applied", (void *)flow);
1087 DEBUG("Flow %p applied", (void *)flow);