4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_flow_driver.h>
38 #include <rte_malloc.h>
40 /* Generated configuration header. */
41 #include "mlx4_autoconf.h"
45 #include "mlx4_flow.h"
47 /** Static initializer for items. */
49 (const enum rte_flow_item_type []){ \
50 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
53 /** Structure to generate a simple graph of layers supported by the NIC. */
54 struct mlx4_flow_items {
55 /** List of possible actions for these items. */
56 const enum rte_flow_action_type *const actions;
57 /** Bit-masks corresponding to the possibilities for the item. */
60 * Default bit-masks to use when item->mask is not provided. When
61 * \default_mask is also NULL, the full supported bit-mask (\mask) is
64 const void *default_mask;
65 /** Bit-masks size in bytes. */
66 const unsigned int mask_sz;
68 * Check support for a given item.
73 * Bit-masks covering supported fields to compare with spec,
77 * Bit-Mask size in bytes.
80 * 0 on success, negative value otherwise.
82 int (*validate)(const struct rte_flow_item *item,
83 const uint8_t *mask, unsigned int size);
85 * Conversion function from rte_flow to NIC specific flow.
88 * rte_flow item to convert.
90 * Default bit-masks to use when item->mask is not provided.
92 * Internal structure to store the conversion.
95 * 0 on success, negative value otherwise.
97 int (*convert)(const struct rte_flow_item *item,
98 const void *default_mask,
100 /** Size in bytes of the destination structure. */
101 const unsigned int dst_sz;
102 /** List of possible following items. */
103 const enum rte_flow_item_type *const items;
106 /** Valid action for this PMD. */
107 static const enum rte_flow_action_type valid_actions[] = {
108 RTE_FLOW_ACTION_TYPE_DROP,
109 RTE_FLOW_ACTION_TYPE_QUEUE,
110 RTE_FLOW_ACTION_TYPE_END,
114 * Convert Ethernet item to Verbs specification.
117 * Item specification.
118 * @param default_mask[in]
119 * Default bit-masks to use when item->mask is not provided.
120 * @param data[in, out]
124 mlx4_flow_create_eth(const struct rte_flow_item *item,
125 const void *default_mask,
128 const struct rte_flow_item_eth *spec = item->spec;
129 const struct rte_flow_item_eth *mask = item->mask;
130 struct mlx4_flow *flow = (struct mlx4_flow *)data;
131 struct ibv_flow_spec_eth *eth;
132 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
135 ++flow->ibv_attr->num_of_specs;
136 flow->ibv_attr->priority = 2;
137 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
138 *eth = (struct ibv_flow_spec_eth) {
139 .type = IBV_FLOW_SPEC_ETH,
143 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
148 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
149 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
150 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
151 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
152 /* Remove unwanted bits from values. */
153 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
154 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
155 eth->val.src_mac[i] &= eth->mask.src_mac[i];
161 * Convert VLAN item to Verbs specification.
164 * Item specification.
165 * @param default_mask[in]
166 * Default bit-masks to use when item->mask is not provided.
167 * @param data[in, out]
171 mlx4_flow_create_vlan(const struct rte_flow_item *item,
172 const void *default_mask,
175 const struct rte_flow_item_vlan *spec = item->spec;
176 const struct rte_flow_item_vlan *mask = item->mask;
177 struct mlx4_flow *flow = (struct mlx4_flow *)data;
178 struct ibv_flow_spec_eth *eth;
179 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
181 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
186 eth->val.vlan_tag = spec->tci;
187 eth->mask.vlan_tag = mask->tci;
188 eth->val.vlan_tag &= eth->mask.vlan_tag;
193 * Convert IPv4 item to Verbs specification.
196 * Item specification.
197 * @param default_mask[in]
198 * Default bit-masks to use when item->mask is not provided.
199 * @param data[in, out]
203 mlx4_flow_create_ipv4(const struct rte_flow_item *item,
204 const void *default_mask,
207 const struct rte_flow_item_ipv4 *spec = item->spec;
208 const struct rte_flow_item_ipv4 *mask = item->mask;
209 struct mlx4_flow *flow = (struct mlx4_flow *)data;
210 struct ibv_flow_spec_ipv4 *ipv4;
211 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
213 ++flow->ibv_attr->num_of_specs;
214 flow->ibv_attr->priority = 1;
215 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
216 *ipv4 = (struct ibv_flow_spec_ipv4) {
217 .type = IBV_FLOW_SPEC_IPV4,
222 ipv4->val = (struct ibv_flow_ipv4_filter) {
223 .src_ip = spec->hdr.src_addr,
224 .dst_ip = spec->hdr.dst_addr,
228 ipv4->mask = (struct ibv_flow_ipv4_filter) {
229 .src_ip = mask->hdr.src_addr,
230 .dst_ip = mask->hdr.dst_addr,
232 /* Remove unwanted bits from values. */
233 ipv4->val.src_ip &= ipv4->mask.src_ip;
234 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
239 * Convert UDP item to Verbs specification.
242 * Item specification.
243 * @param default_mask[in]
244 * Default bit-masks to use when item->mask is not provided.
245 * @param data[in, out]
249 mlx4_flow_create_udp(const struct rte_flow_item *item,
250 const void *default_mask,
253 const struct rte_flow_item_udp *spec = item->spec;
254 const struct rte_flow_item_udp *mask = item->mask;
255 struct mlx4_flow *flow = (struct mlx4_flow *)data;
256 struct ibv_flow_spec_tcp_udp *udp;
257 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
259 ++flow->ibv_attr->num_of_specs;
260 flow->ibv_attr->priority = 0;
261 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
262 *udp = (struct ibv_flow_spec_tcp_udp) {
263 .type = IBV_FLOW_SPEC_UDP,
268 udp->val.dst_port = spec->hdr.dst_port;
269 udp->val.src_port = spec->hdr.src_port;
272 udp->mask.dst_port = mask->hdr.dst_port;
273 udp->mask.src_port = mask->hdr.src_port;
274 /* Remove unwanted bits from values. */
275 udp->val.src_port &= udp->mask.src_port;
276 udp->val.dst_port &= udp->mask.dst_port;
281 * Convert TCP item to Verbs specification.
284 * Item specification.
285 * @param default_mask[in]
286 * Default bit-masks to use when item->mask is not provided.
287 * @param data[in, out]
291 mlx4_flow_create_tcp(const struct rte_flow_item *item,
292 const void *default_mask,
295 const struct rte_flow_item_tcp *spec = item->spec;
296 const struct rte_flow_item_tcp *mask = item->mask;
297 struct mlx4_flow *flow = (struct mlx4_flow *)data;
298 struct ibv_flow_spec_tcp_udp *tcp;
299 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
301 ++flow->ibv_attr->num_of_specs;
302 flow->ibv_attr->priority = 0;
303 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
304 *tcp = (struct ibv_flow_spec_tcp_udp) {
305 .type = IBV_FLOW_SPEC_TCP,
310 tcp->val.dst_port = spec->hdr.dst_port;
311 tcp->val.src_port = spec->hdr.src_port;
314 tcp->mask.dst_port = mask->hdr.dst_port;
315 tcp->mask.src_port = mask->hdr.src_port;
316 /* Remove unwanted bits from values. */
317 tcp->val.src_port &= tcp->mask.src_port;
318 tcp->val.dst_port &= tcp->mask.dst_port;
323 * Check support for a given item.
326 * Item specification.
328 * Bit-masks covering supported fields to compare with spec, last and mask in
331 * Bit-Mask size in bytes.
334 * 0 on success, negative value otherwise.
337 mlx4_flow_item_validate(const struct rte_flow_item *item,
338 const uint8_t *mask, unsigned int size)
342 if (!item->spec && (item->mask || item->last))
344 if (item->spec && !item->mask) {
346 const uint8_t *spec = item->spec;
348 for (i = 0; i < size; ++i)
349 if ((spec[i] | mask[i]) != mask[i])
352 if (item->last && !item->mask) {
354 const uint8_t *spec = item->last;
356 for (i = 0; i < size; ++i)
357 if ((spec[i] | mask[i]) != mask[i])
360 if (item->spec && item->last) {
363 const uint8_t *apply = mask;
368 for (i = 0; i < size; ++i) {
369 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
370 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
372 ret = memcmp(spec, last, size);
378 mlx4_flow_validate_eth(const struct rte_flow_item *item,
379 const uint8_t *mask, unsigned int size)
382 const struct rte_flow_item_eth *mask = item->mask;
384 if (mask->dst.addr_bytes[0] != 0xff ||
385 mask->dst.addr_bytes[1] != 0xff ||
386 mask->dst.addr_bytes[2] != 0xff ||
387 mask->dst.addr_bytes[3] != 0xff ||
388 mask->dst.addr_bytes[4] != 0xff ||
389 mask->dst.addr_bytes[5] != 0xff)
392 return mlx4_flow_item_validate(item, mask, size);
396 mlx4_flow_validate_vlan(const struct rte_flow_item *item,
397 const uint8_t *mask, unsigned int size)
400 const struct rte_flow_item_vlan *mask = item->mask;
402 if (mask->tci != 0 &&
403 ntohs(mask->tci) != 0x0fff)
406 return mlx4_flow_item_validate(item, mask, size);
410 mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
411 const uint8_t *mask, unsigned int size)
414 const struct rte_flow_item_ipv4 *mask = item->mask;
416 if (mask->hdr.src_addr != 0 &&
417 mask->hdr.src_addr != 0xffffffff)
419 if (mask->hdr.dst_addr != 0 &&
420 mask->hdr.dst_addr != 0xffffffff)
423 return mlx4_flow_item_validate(item, mask, size);
427 mlx4_flow_validate_udp(const struct rte_flow_item *item,
428 const uint8_t *mask, unsigned int size)
431 const struct rte_flow_item_udp *mask = item->mask;
433 if (mask->hdr.src_port != 0 &&
434 mask->hdr.src_port != 0xffff)
436 if (mask->hdr.dst_port != 0 &&
437 mask->hdr.dst_port != 0xffff)
440 return mlx4_flow_item_validate(item, mask, size);
444 mlx4_flow_validate_tcp(const struct rte_flow_item *item,
445 const uint8_t *mask, unsigned int size)
448 const struct rte_flow_item_tcp *mask = item->mask;
450 if (mask->hdr.src_port != 0 &&
451 mask->hdr.src_port != 0xffff)
453 if (mask->hdr.dst_port != 0 &&
454 mask->hdr.dst_port != 0xffff)
457 return mlx4_flow_item_validate(item, mask, size);
460 /** Graph of supported items and associated actions. */
461 static const struct mlx4_flow_items mlx4_flow_items[] = {
462 [RTE_FLOW_ITEM_TYPE_END] = {
463 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
465 [RTE_FLOW_ITEM_TYPE_ETH] = {
466 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
467 RTE_FLOW_ITEM_TYPE_IPV4),
468 .actions = valid_actions,
469 .mask = &(const struct rte_flow_item_eth){
470 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
471 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
473 .default_mask = &rte_flow_item_eth_mask,
474 .mask_sz = sizeof(struct rte_flow_item_eth),
475 .validate = mlx4_flow_validate_eth,
476 .convert = mlx4_flow_create_eth,
477 .dst_sz = sizeof(struct ibv_flow_spec_eth),
479 [RTE_FLOW_ITEM_TYPE_VLAN] = {
480 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
481 .actions = valid_actions,
482 .mask = &(const struct rte_flow_item_vlan){
483 /* rte_flow_item_vlan_mask is invalid for mlx4. */
484 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
490 .mask_sz = sizeof(struct rte_flow_item_vlan),
491 .validate = mlx4_flow_validate_vlan,
492 .convert = mlx4_flow_create_vlan,
495 [RTE_FLOW_ITEM_TYPE_IPV4] = {
496 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
497 RTE_FLOW_ITEM_TYPE_TCP),
498 .actions = valid_actions,
499 .mask = &(const struct rte_flow_item_ipv4){
505 .default_mask = &rte_flow_item_ipv4_mask,
506 .mask_sz = sizeof(struct rte_flow_item_ipv4),
507 .validate = mlx4_flow_validate_ipv4,
508 .convert = mlx4_flow_create_ipv4,
509 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
511 [RTE_FLOW_ITEM_TYPE_UDP] = {
512 .actions = valid_actions,
513 .mask = &(const struct rte_flow_item_udp){
519 .default_mask = &rte_flow_item_udp_mask,
520 .mask_sz = sizeof(struct rte_flow_item_udp),
521 .validate = mlx4_flow_validate_udp,
522 .convert = mlx4_flow_create_udp,
523 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
525 [RTE_FLOW_ITEM_TYPE_TCP] = {
526 .actions = valid_actions,
527 .mask = &(const struct rte_flow_item_tcp){
533 .default_mask = &rte_flow_item_tcp_mask,
534 .mask_sz = sizeof(struct rte_flow_item_tcp),
535 .validate = mlx4_flow_validate_tcp,
536 .convert = mlx4_flow_create_tcp,
537 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
542 * Validate a flow supported by the NIC.
545 * Pointer to private structure.
547 * Flow rule attributes.
549 * Pattern specification (list terminated by the END pattern item).
551 * Associated actions (list terminated by the END action).
553 * Perform verbose error reporting if not NULL.
554 * @param[in, out] flow
555 * Flow structure to update.
558 * 0 on success, a negative errno value otherwise and rte_errno is set.
561 priv_flow_validate(struct priv *priv,
562 const struct rte_flow_attr *attr,
563 const struct rte_flow_item items[],
564 const struct rte_flow_action actions[],
565 struct rte_flow_error *error,
566 struct mlx4_flow *flow)
568 const struct mlx4_flow_items *cur_item = mlx4_flow_items;
569 struct mlx4_flow_action action = {
576 rte_flow_error_set(error, ENOTSUP,
577 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
579 "groups are not supported");
582 if (attr->priority) {
583 rte_flow_error_set(error, ENOTSUP,
584 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
586 "priorities are not supported");
590 rte_flow_error_set(error, ENOTSUP,
591 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
593 "egress is not supported");
596 if (!attr->ingress) {
597 rte_flow_error_set(error, ENOTSUP,
598 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
600 "only ingress is supported");
603 /* Go over items list. */
604 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
605 const struct mlx4_flow_items *token = NULL;
609 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
612 * The nic can support patterns with NULL eth spec only
613 * if eth is a single item in a rule.
616 items->type == RTE_FLOW_ITEM_TYPE_ETH) {
617 const struct rte_flow_item *next = items + 1;
619 if (next->type != RTE_FLOW_ITEM_TYPE_END) {
620 rte_flow_error_set(error, ENOTSUP,
621 RTE_FLOW_ERROR_TYPE_ITEM,
624 " an Ethernet spec");
630 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
632 if (cur_item->items[i] == items->type) {
633 token = &mlx4_flow_items[items->type];
638 goto exit_item_not_supported;
640 err = cur_item->validate(items,
641 (const uint8_t *)cur_item->mask,
644 goto exit_item_not_supported;
645 if (flow->ibv_attr && cur_item->convert) {
646 err = cur_item->convert(items,
647 (cur_item->default_mask ?
648 cur_item->default_mask :
652 goto exit_item_not_supported;
654 flow->offset += cur_item->dst_sz;
656 /* Go over actions list */
657 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
658 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
660 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
662 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
663 const struct rte_flow_action_queue *queue =
664 (const struct rte_flow_action_queue *)
667 if (!queue || (queue->index > (priv->rxqs_n - 1)))
668 goto exit_action_not_supported;
671 goto exit_action_not_supported;
674 if (!action.queue && !action.drop) {
675 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
676 NULL, "no valid action");
680 exit_item_not_supported:
681 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
682 items, "item not supported");
684 exit_action_not_supported:
685 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
686 actions, "action not supported");
691 * Validate a flow supported by the NIC.
693 * @see rte_flow_validate()
697 mlx4_flow_validate(struct rte_eth_dev *dev,
698 const struct rte_flow_attr *attr,
699 const struct rte_flow_item items[],
700 const struct rte_flow_action actions[],
701 struct rte_flow_error *error)
703 struct priv *priv = dev->data->dev_private;
705 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
708 ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
714 * Complete flow rule creation.
717 * Pointer to private structure.
719 * Verbs flow attributes.
721 * Target action structure.
723 * Perform verbose error reporting if not NULL.
726 * A flow if the rule could be created.
728 static struct rte_flow *
729 priv_flow_create_action_queue(struct priv *priv,
730 struct ibv_flow_attr *ibv_attr,
731 struct mlx4_flow_action *action,
732 struct rte_flow_error *error)
736 struct rte_flow *rte_flow;
740 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
742 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
743 NULL, "cannot allocate flow memory");
746 rxq = (*priv->rxqs)[action->queue_id];
749 ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
750 &(struct ibv_exp_cq_init_attr){
754 rte_flow_error_set(error, ENOMEM,
755 RTE_FLOW_ERROR_TYPE_HANDLE,
756 NULL, "cannot allocate CQ");
759 rte_flow->qp = ibv_exp_create_qp(
761 &(struct ibv_exp_qp_init_attr){
762 .send_cq = rte_flow->cq,
763 .recv_cq = rte_flow->cq,
768 .qp_type = IBV_QPT_RAW_PACKET,
770 IBV_EXP_QP_INIT_ATTR_PD |
771 IBV_EXP_QP_INIT_ATTR_PORT |
772 IBV_EXP_QP_INIT_ATTR_RES_DOMAIN,
774 .res_domain = rxq->rd,
775 .port_num = priv->port,
778 rte_flow_error_set(error, ENOMEM,
779 RTE_FLOW_ERROR_TYPE_HANDLE,
780 NULL, "cannot allocate QP");
788 rte_flow->ibv_attr = ibv_attr;
789 rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
790 if (!rte_flow->ibv_flow) {
791 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
792 NULL, "flow rule creation failure");
800 ibv_destroy_cq(rte_flow->cq);
802 ibv_destroy_qp(rte_flow->qp);
803 rte_free(rte_flow->ibv_attr);
812 * Pointer to private structure.
814 * Flow rule attributes.
816 * Pattern specification (list terminated by the END pattern item).
818 * Associated actions (list terminated by the END action).
820 * Perform verbose error reporting if not NULL.
823 * A flow on success, NULL otherwise.
825 static struct rte_flow *
826 priv_flow_create(struct priv *priv,
827 const struct rte_flow_attr *attr,
828 const struct rte_flow_item items[],
829 const struct rte_flow_action actions[],
830 struct rte_flow_error *error)
832 struct rte_flow *rte_flow;
833 struct mlx4_flow_action action;
834 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
837 err = priv_flow_validate(priv, attr, items, actions, error, &flow);
840 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
841 if (!flow.ibv_attr) {
842 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
843 NULL, "cannot allocate ibv_attr memory");
846 flow.offset = sizeof(struct ibv_flow_attr);
847 *flow.ibv_attr = (struct ibv_flow_attr){
849 .type = IBV_FLOW_ATTR_NORMAL,
850 .size = sizeof(struct ibv_flow_attr),
851 .priority = attr->priority,
856 claim_zero(priv_flow_validate(priv, attr, items, actions,
858 action = (struct mlx4_flow_action){
862 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
863 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
865 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
868 ((const struct rte_flow_action_queue *)
869 actions->conf)->index;
870 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
873 rte_flow_error_set(error, ENOTSUP,
874 RTE_FLOW_ERROR_TYPE_ACTION,
875 actions, "unsupported action");
879 rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
883 rte_free(flow.ibv_attr);
890 * @see rte_flow_create()
894 mlx4_flow_create(struct rte_eth_dev *dev,
895 const struct rte_flow_attr *attr,
896 const struct rte_flow_item items[],
897 const struct rte_flow_action actions[],
898 struct rte_flow_error *error)
900 struct priv *priv = dev->data->dev_private;
901 struct rte_flow *flow;
904 flow = priv_flow_create(priv, attr, items, actions, error);
906 LIST_INSERT_HEAD(&priv->flows, flow, next);
907 DEBUG("Flow created %p", (void *)flow);
917 * Pointer to private structure.
922 priv_flow_destroy(struct priv *priv, struct rte_flow *flow)
925 LIST_REMOVE(flow, next);
927 claim_zero(ibv_destroy_flow(flow->ibv_flow));
929 claim_zero(ibv_destroy_qp(flow->qp));
931 claim_zero(ibv_destroy_cq(flow->cq));
932 rte_free(flow->ibv_attr);
933 DEBUG("Flow destroyed %p", (void *)flow);
940 * @see rte_flow_destroy()
944 mlx4_flow_destroy(struct rte_eth_dev *dev,
945 struct rte_flow *flow,
946 struct rte_flow_error *error)
948 struct priv *priv = dev->data->dev_private;
952 priv_flow_destroy(priv, flow);
961 * Pointer to private structure.
964 priv_flow_flush(struct priv *priv)
966 while (!LIST_EMPTY(&priv->flows)) {
967 struct rte_flow *flow;
969 flow = LIST_FIRST(&priv->flows);
970 priv_flow_destroy(priv, flow);
977 * @see rte_flow_flush()
981 mlx4_flow_flush(struct rte_eth_dev *dev,
982 struct rte_flow_error *error)
984 struct priv *priv = dev->data->dev_private;
988 priv_flow_flush(priv);
996 * Called by dev_stop() to remove all flows.
999 * Pointer to private structure.
1002 mlx4_priv_flow_stop(struct priv *priv)
1004 struct rte_flow *flow;
1006 for (flow = LIST_FIRST(&priv->flows);
1008 flow = LIST_NEXT(flow, next)) {
1009 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1010 flow->ibv_flow = NULL;
1011 DEBUG("Flow %p removed", (void *)flow);
1019 * Pointer to private structure.
1022 * 0 on success, a errno value otherwise and rte_errno is set.
1025 mlx4_priv_flow_start(struct priv *priv)
1028 struct rte_flow *flow;
1030 for (flow = LIST_FIRST(&priv->flows);
1032 flow = LIST_NEXT(flow, next)) {
1033 qp = flow->qp ? flow->qp : flow->rxq->qp;
1034 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1035 if (!flow->ibv_flow) {
1036 DEBUG("Flow %p cannot be applied", (void *)flow);
1040 DEBUG("Flow %p applied", (void *)flow);