4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_flow_driver.h>
38 #include <rte_malloc.h>
42 #include "mlx4_flow.h"
44 /** Static initializer for items. */
46 (const enum rte_flow_item_type []){ \
47 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
50 /** Structure to generate a simple graph of layers supported by the NIC. */
51 struct mlx4_flow_items {
52 /** List of possible actions for these items. */
53 const enum rte_flow_action_type *const actions;
54 /** Bit-masks corresponding to the possibilities for the item. */
57 * Default bit-masks to use when item->mask is not provided. When
58 * \default_mask is also NULL, the full supported bit-mask (\mask) is
61 const void *default_mask;
62 /** Bit-masks size in bytes. */
63 const unsigned int mask_sz;
65 * Check support for a given item.
70 * Bit-masks covering supported fields to compare with spec,
74 * Bit-Mask size in bytes.
77 * 0 on success, negative value otherwise.
79 int (*validate)(const struct rte_flow_item *item,
80 const uint8_t *mask, unsigned int size);
82 * Conversion function from rte_flow to NIC specific flow.
85 * rte_flow item to convert.
87 * Default bit-masks to use when item->mask is not provided.
89 * Internal structure to store the conversion.
92 * 0 on success, negative value otherwise.
94 int (*convert)(const struct rte_flow_item *item,
95 const void *default_mask,
97 /** Size in bytes of the destination structure. */
98 const unsigned int dst_sz;
99 /** List of possible following items. */
100 const enum rte_flow_item_type *const items;
103 struct rte_flow_drop {
104 struct ibv_qp *qp; /**< Verbs queue pair. */
105 struct ibv_cq *cq; /**< Verbs completion queue. */
108 /** Valid action for this PMD. */
109 static const enum rte_flow_action_type valid_actions[] = {
110 RTE_FLOW_ACTION_TYPE_DROP,
111 RTE_FLOW_ACTION_TYPE_QUEUE,
112 RTE_FLOW_ACTION_TYPE_END,
116 * Convert Ethernet item to Verbs specification.
119 * Item specification.
120 * @param default_mask[in]
121 * Default bit-masks to use when item->mask is not provided.
122 * @param data[in, out]
126 mlx4_flow_create_eth(const struct rte_flow_item *item,
127 const void *default_mask,
130 const struct rte_flow_item_eth *spec = item->spec;
131 const struct rte_flow_item_eth *mask = item->mask;
132 struct mlx4_flow *flow = (struct mlx4_flow *)data;
133 struct ibv_flow_spec_eth *eth;
134 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
137 ++flow->ibv_attr->num_of_specs;
138 flow->ibv_attr->priority = 2;
139 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
140 *eth = (struct ibv_flow_spec_eth) {
141 .type = IBV_FLOW_SPEC_ETH,
145 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
150 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
151 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
152 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
153 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
154 /* Remove unwanted bits from values. */
155 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
156 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
157 eth->val.src_mac[i] &= eth->mask.src_mac[i];
163 * Convert VLAN item to Verbs specification.
166 * Item specification.
167 * @param default_mask[in]
168 * Default bit-masks to use when item->mask is not provided.
169 * @param data[in, out]
173 mlx4_flow_create_vlan(const struct rte_flow_item *item,
174 const void *default_mask,
177 const struct rte_flow_item_vlan *spec = item->spec;
178 const struct rte_flow_item_vlan *mask = item->mask;
179 struct mlx4_flow *flow = (struct mlx4_flow *)data;
180 struct ibv_flow_spec_eth *eth;
181 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
183 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
188 eth->val.vlan_tag = spec->tci;
189 eth->mask.vlan_tag = mask->tci;
190 eth->val.vlan_tag &= eth->mask.vlan_tag;
195 * Convert IPv4 item to Verbs specification.
198 * Item specification.
199 * @param default_mask[in]
200 * Default bit-masks to use when item->mask is not provided.
201 * @param data[in, out]
205 mlx4_flow_create_ipv4(const struct rte_flow_item *item,
206 const void *default_mask,
209 const struct rte_flow_item_ipv4 *spec = item->spec;
210 const struct rte_flow_item_ipv4 *mask = item->mask;
211 struct mlx4_flow *flow = (struct mlx4_flow *)data;
212 struct ibv_flow_spec_ipv4 *ipv4;
213 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
215 ++flow->ibv_attr->num_of_specs;
216 flow->ibv_attr->priority = 1;
217 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
218 *ipv4 = (struct ibv_flow_spec_ipv4) {
219 .type = IBV_FLOW_SPEC_IPV4,
224 ipv4->val = (struct ibv_flow_ipv4_filter) {
225 .src_ip = spec->hdr.src_addr,
226 .dst_ip = spec->hdr.dst_addr,
230 ipv4->mask = (struct ibv_flow_ipv4_filter) {
231 .src_ip = mask->hdr.src_addr,
232 .dst_ip = mask->hdr.dst_addr,
234 /* Remove unwanted bits from values. */
235 ipv4->val.src_ip &= ipv4->mask.src_ip;
236 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
241 * Convert UDP item to Verbs specification.
244 * Item specification.
245 * @param default_mask[in]
246 * Default bit-masks to use when item->mask is not provided.
247 * @param data[in, out]
251 mlx4_flow_create_udp(const struct rte_flow_item *item,
252 const void *default_mask,
255 const struct rte_flow_item_udp *spec = item->spec;
256 const struct rte_flow_item_udp *mask = item->mask;
257 struct mlx4_flow *flow = (struct mlx4_flow *)data;
258 struct ibv_flow_spec_tcp_udp *udp;
259 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
261 ++flow->ibv_attr->num_of_specs;
262 flow->ibv_attr->priority = 0;
263 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
264 *udp = (struct ibv_flow_spec_tcp_udp) {
265 .type = IBV_FLOW_SPEC_UDP,
270 udp->val.dst_port = spec->hdr.dst_port;
271 udp->val.src_port = spec->hdr.src_port;
274 udp->mask.dst_port = mask->hdr.dst_port;
275 udp->mask.src_port = mask->hdr.src_port;
276 /* Remove unwanted bits from values. */
277 udp->val.src_port &= udp->mask.src_port;
278 udp->val.dst_port &= udp->mask.dst_port;
283 * Convert TCP item to Verbs specification.
286 * Item specification.
287 * @param default_mask[in]
288 * Default bit-masks to use when item->mask is not provided.
289 * @param data[in, out]
293 mlx4_flow_create_tcp(const struct rte_flow_item *item,
294 const void *default_mask,
297 const struct rte_flow_item_tcp *spec = item->spec;
298 const struct rte_flow_item_tcp *mask = item->mask;
299 struct mlx4_flow *flow = (struct mlx4_flow *)data;
300 struct ibv_flow_spec_tcp_udp *tcp;
301 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
303 ++flow->ibv_attr->num_of_specs;
304 flow->ibv_attr->priority = 0;
305 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
306 *tcp = (struct ibv_flow_spec_tcp_udp) {
307 .type = IBV_FLOW_SPEC_TCP,
312 tcp->val.dst_port = spec->hdr.dst_port;
313 tcp->val.src_port = spec->hdr.src_port;
316 tcp->mask.dst_port = mask->hdr.dst_port;
317 tcp->mask.src_port = mask->hdr.src_port;
318 /* Remove unwanted bits from values. */
319 tcp->val.src_port &= tcp->mask.src_port;
320 tcp->val.dst_port &= tcp->mask.dst_port;
325 * Check support for a given item.
328 * Item specification.
330 * Bit-masks covering supported fields to compare with spec, last and mask in
333 * Bit-Mask size in bytes.
336 * 0 on success, negative value otherwise.
339 mlx4_flow_item_validate(const struct rte_flow_item *item,
340 const uint8_t *mask, unsigned int size)
344 if (!item->spec && (item->mask || item->last))
346 if (item->spec && !item->mask) {
348 const uint8_t *spec = item->spec;
350 for (i = 0; i < size; ++i)
351 if ((spec[i] | mask[i]) != mask[i])
354 if (item->last && !item->mask) {
356 const uint8_t *spec = item->last;
358 for (i = 0; i < size; ++i)
359 if ((spec[i] | mask[i]) != mask[i])
362 if (item->spec && item->last) {
365 const uint8_t *apply = mask;
370 for (i = 0; i < size; ++i) {
371 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
372 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
374 ret = memcmp(spec, last, size);
380 mlx4_flow_validate_eth(const struct rte_flow_item *item,
381 const uint8_t *mask, unsigned int size)
384 const struct rte_flow_item_eth *mask = item->mask;
386 if (mask->dst.addr_bytes[0] != 0xff ||
387 mask->dst.addr_bytes[1] != 0xff ||
388 mask->dst.addr_bytes[2] != 0xff ||
389 mask->dst.addr_bytes[3] != 0xff ||
390 mask->dst.addr_bytes[4] != 0xff ||
391 mask->dst.addr_bytes[5] != 0xff)
394 return mlx4_flow_item_validate(item, mask, size);
398 mlx4_flow_validate_vlan(const struct rte_flow_item *item,
399 const uint8_t *mask, unsigned int size)
402 const struct rte_flow_item_vlan *mask = item->mask;
404 if (mask->tci != 0 &&
405 ntohs(mask->tci) != 0x0fff)
408 return mlx4_flow_item_validate(item, mask, size);
412 mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
413 const uint8_t *mask, unsigned int size)
416 const struct rte_flow_item_ipv4 *mask = item->mask;
418 if (mask->hdr.src_addr != 0 &&
419 mask->hdr.src_addr != 0xffffffff)
421 if (mask->hdr.dst_addr != 0 &&
422 mask->hdr.dst_addr != 0xffffffff)
425 return mlx4_flow_item_validate(item, mask, size);
429 mlx4_flow_validate_udp(const struct rte_flow_item *item,
430 const uint8_t *mask, unsigned int size)
433 const struct rte_flow_item_udp *mask = item->mask;
435 if (mask->hdr.src_port != 0 &&
436 mask->hdr.src_port != 0xffff)
438 if (mask->hdr.dst_port != 0 &&
439 mask->hdr.dst_port != 0xffff)
442 return mlx4_flow_item_validate(item, mask, size);
446 mlx4_flow_validate_tcp(const struct rte_flow_item *item,
447 const uint8_t *mask, unsigned int size)
450 const struct rte_flow_item_tcp *mask = item->mask;
452 if (mask->hdr.src_port != 0 &&
453 mask->hdr.src_port != 0xffff)
455 if (mask->hdr.dst_port != 0 &&
456 mask->hdr.dst_port != 0xffff)
459 return mlx4_flow_item_validate(item, mask, size);
462 /** Graph of supported items and associated actions. */
463 static const struct mlx4_flow_items mlx4_flow_items[] = {
464 [RTE_FLOW_ITEM_TYPE_END] = {
465 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
467 [RTE_FLOW_ITEM_TYPE_ETH] = {
468 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
469 RTE_FLOW_ITEM_TYPE_IPV4),
470 .actions = valid_actions,
471 .mask = &(const struct rte_flow_item_eth){
472 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
473 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
475 .default_mask = &rte_flow_item_eth_mask,
476 .mask_sz = sizeof(struct rte_flow_item_eth),
477 .validate = mlx4_flow_validate_eth,
478 .convert = mlx4_flow_create_eth,
479 .dst_sz = sizeof(struct ibv_flow_spec_eth),
481 [RTE_FLOW_ITEM_TYPE_VLAN] = {
482 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
483 .actions = valid_actions,
484 .mask = &(const struct rte_flow_item_vlan){
485 /* rte_flow_item_vlan_mask is invalid for mlx4. */
486 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
492 .mask_sz = sizeof(struct rte_flow_item_vlan),
493 .validate = mlx4_flow_validate_vlan,
494 .convert = mlx4_flow_create_vlan,
497 [RTE_FLOW_ITEM_TYPE_IPV4] = {
498 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
499 RTE_FLOW_ITEM_TYPE_TCP),
500 .actions = valid_actions,
501 .mask = &(const struct rte_flow_item_ipv4){
507 .default_mask = &rte_flow_item_ipv4_mask,
508 .mask_sz = sizeof(struct rte_flow_item_ipv4),
509 .validate = mlx4_flow_validate_ipv4,
510 .convert = mlx4_flow_create_ipv4,
511 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
513 [RTE_FLOW_ITEM_TYPE_UDP] = {
514 .actions = valid_actions,
515 .mask = &(const struct rte_flow_item_udp){
521 .default_mask = &rte_flow_item_udp_mask,
522 .mask_sz = sizeof(struct rte_flow_item_udp),
523 .validate = mlx4_flow_validate_udp,
524 .convert = mlx4_flow_create_udp,
525 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
527 [RTE_FLOW_ITEM_TYPE_TCP] = {
528 .actions = valid_actions,
529 .mask = &(const struct rte_flow_item_tcp){
535 .default_mask = &rte_flow_item_tcp_mask,
536 .mask_sz = sizeof(struct rte_flow_item_tcp),
537 .validate = mlx4_flow_validate_tcp,
538 .convert = mlx4_flow_create_tcp,
539 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
544 * Validate a flow supported by the NIC.
547 * Pointer to private structure.
549 * Flow rule attributes.
551 * Pattern specification (list terminated by the END pattern item).
553 * Associated actions (list terminated by the END action).
555 * Perform verbose error reporting if not NULL.
556 * @param[in, out] flow
557 * Flow structure to update.
560 * 0 on success, a negative errno value otherwise and rte_errno is set.
563 priv_flow_validate(struct priv *priv,
564 const struct rte_flow_attr *attr,
565 const struct rte_flow_item items[],
566 const struct rte_flow_action actions[],
567 struct rte_flow_error *error,
568 struct mlx4_flow *flow)
570 const struct mlx4_flow_items *cur_item = mlx4_flow_items;
571 struct mlx4_flow_action action = {
578 rte_flow_error_set(error, ENOTSUP,
579 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
581 "groups are not supported");
584 if (attr->priority) {
585 rte_flow_error_set(error, ENOTSUP,
586 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
588 "priorities are not supported");
592 rte_flow_error_set(error, ENOTSUP,
593 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
595 "egress is not supported");
598 if (!attr->ingress) {
599 rte_flow_error_set(error, ENOTSUP,
600 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
602 "only ingress is supported");
605 /* Go over items list. */
606 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
607 const struct mlx4_flow_items *token = NULL;
611 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
614 * The nic can support patterns with NULL eth spec only
615 * if eth is a single item in a rule.
618 items->type == RTE_FLOW_ITEM_TYPE_ETH) {
619 const struct rte_flow_item *next = items + 1;
621 if (next->type != RTE_FLOW_ITEM_TYPE_END) {
622 rte_flow_error_set(error, ENOTSUP,
623 RTE_FLOW_ERROR_TYPE_ITEM,
626 " an Ethernet spec");
632 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
634 if (cur_item->items[i] == items->type) {
635 token = &mlx4_flow_items[items->type];
640 goto exit_item_not_supported;
642 err = cur_item->validate(items,
643 (const uint8_t *)cur_item->mask,
646 goto exit_item_not_supported;
647 if (flow->ibv_attr && cur_item->convert) {
648 err = cur_item->convert(items,
649 (cur_item->default_mask ?
650 cur_item->default_mask :
654 goto exit_item_not_supported;
656 flow->offset += cur_item->dst_sz;
658 /* Go over actions list */
659 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
660 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
662 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
664 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
665 const struct rte_flow_action_queue *queue =
666 (const struct rte_flow_action_queue *)
669 if (!queue || (queue->index > (priv->rxqs_n - 1)))
670 goto exit_action_not_supported;
673 goto exit_action_not_supported;
676 if (!action.queue && !action.drop) {
677 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
678 NULL, "no valid action");
682 exit_item_not_supported:
683 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
684 items, "item not supported");
686 exit_action_not_supported:
687 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
688 actions, "action not supported");
693 * Validate a flow supported by the NIC.
695 * @see rte_flow_validate()
699 mlx4_flow_validate(struct rte_eth_dev *dev,
700 const struct rte_flow_attr *attr,
701 const struct rte_flow_item items[],
702 const struct rte_flow_action actions[],
703 struct rte_flow_error *error)
705 struct priv *priv = dev->data->dev_private;
707 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
710 ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
716 * Destroy a drop queue.
719 * Pointer to private structure.
722 mlx4_flow_destroy_drop_queue(struct priv *priv)
724 if (priv->flow_drop_queue) {
725 struct rte_flow_drop *fdq = priv->flow_drop_queue;
727 priv->flow_drop_queue = NULL;
728 claim_zero(ibv_destroy_qp(fdq->qp));
729 claim_zero(ibv_destroy_cq(fdq->cq));
735 * Create a single drop queue for all drop flows.
738 * Pointer to private structure.
741 * 0 on success, negative value otherwise.
744 mlx4_flow_create_drop_queue(struct priv *priv)
748 struct rte_flow_drop *fdq;
750 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
752 ERROR("Cannot allocate memory for drop struct");
755 cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
757 ERROR("Cannot create drop CQ");
760 qp = ibv_create_qp(priv->pd,
761 &(struct ibv_qp_init_attr){
768 .qp_type = IBV_QPT_RAW_PACKET,
771 ERROR("Cannot create drop QP");
774 *fdq = (struct rte_flow_drop){
778 priv->flow_drop_queue = fdq;
781 claim_zero(ibv_destroy_cq(cq));
789 * Complete flow rule creation.
792 * Pointer to private structure.
794 * Verbs flow attributes.
796 * Target action structure.
798 * Perform verbose error reporting if not NULL.
801 * A flow if the rule could be created.
803 static struct rte_flow *
804 priv_flow_create_action_queue(struct priv *priv,
805 struct ibv_flow_attr *ibv_attr,
806 struct mlx4_flow_action *action,
807 struct rte_flow_error *error)
810 struct rte_flow *rte_flow;
814 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
816 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
817 NULL, "cannot allocate flow memory");
821 qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
823 struct rxq *rxq = (*priv->rxqs)[action->queue_id];
828 rte_flow->ibv_attr = ibv_attr;
831 rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
832 if (!rte_flow->ibv_flow) {
833 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
834 NULL, "flow rule creation failure");
848 * Pointer to private structure.
850 * Flow rule attributes.
852 * Pattern specification (list terminated by the END pattern item).
854 * Associated actions (list terminated by the END action).
856 * Perform verbose error reporting if not NULL.
859 * A flow on success, NULL otherwise.
861 static struct rte_flow *
862 priv_flow_create(struct priv *priv,
863 const struct rte_flow_attr *attr,
864 const struct rte_flow_item items[],
865 const struct rte_flow_action actions[],
866 struct rte_flow_error *error)
868 struct rte_flow *rte_flow;
869 struct mlx4_flow_action action;
870 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
873 err = priv_flow_validate(priv, attr, items, actions, error, &flow);
876 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
877 if (!flow.ibv_attr) {
878 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
879 NULL, "cannot allocate ibv_attr memory");
882 flow.offset = sizeof(struct ibv_flow_attr);
883 *flow.ibv_attr = (struct ibv_flow_attr){
885 .type = IBV_FLOW_ATTR_NORMAL,
886 .size = sizeof(struct ibv_flow_attr),
887 .priority = attr->priority,
892 claim_zero(priv_flow_validate(priv, attr, items, actions,
894 action = (struct mlx4_flow_action){
898 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
899 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
901 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
904 ((const struct rte_flow_action_queue *)
905 actions->conf)->index;
906 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
909 rte_flow_error_set(error, ENOTSUP,
910 RTE_FLOW_ERROR_TYPE_ACTION,
911 actions, "unsupported action");
915 rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
920 rte_free(flow.ibv_attr);
927 * @see rte_flow_create()
931 mlx4_flow_create(struct rte_eth_dev *dev,
932 const struct rte_flow_attr *attr,
933 const struct rte_flow_item items[],
934 const struct rte_flow_action actions[],
935 struct rte_flow_error *error)
937 struct priv *priv = dev->data->dev_private;
938 struct rte_flow *flow;
941 flow = priv_flow_create(priv, attr, items, actions, error);
943 LIST_INSERT_HEAD(&priv->flows, flow, next);
944 DEBUG("Flow created %p", (void *)flow);
951 * @see rte_flow_isolate()
953 * Must be done before calling dev_configure().
956 * Pointer to the ethernet device structure.
958 * Nonzero to enter isolated mode, attempt to leave it otherwise.
960 * Perform verbose error reporting if not NULL. PMDs initialize this
961 * structure in case of error only.
964 * 0 on success, a negative value on error.
967 mlx4_flow_isolate(struct rte_eth_dev *dev,
969 struct rte_flow_error *error)
971 struct priv *priv = dev->data->dev_private;
975 rte_flow_error_set(error, ENOTSUP,
976 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
977 NULL, "isolated mode must be set"
978 " before configuring the device");
982 priv->isolated = !!enable;
991 * Pointer to private structure.
996 priv_flow_destroy(struct priv *priv, struct rte_flow *flow)
999 LIST_REMOVE(flow, next);
1001 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1002 rte_free(flow->ibv_attr);
1003 DEBUG("Flow destroyed %p", (void *)flow);
1010 * @see rte_flow_destroy()
1014 mlx4_flow_destroy(struct rte_eth_dev *dev,
1015 struct rte_flow *flow,
1016 struct rte_flow_error *error)
1018 struct priv *priv = dev->data->dev_private;
1022 priv_flow_destroy(priv, flow);
1028 * Destroy all flows.
1031 * Pointer to private structure.
1034 priv_flow_flush(struct priv *priv)
1036 while (!LIST_EMPTY(&priv->flows)) {
1037 struct rte_flow *flow;
1039 flow = LIST_FIRST(&priv->flows);
1040 priv_flow_destroy(priv, flow);
1045 * Destroy all flows.
1047 * @see rte_flow_flush()
1051 mlx4_flow_flush(struct rte_eth_dev *dev,
1052 struct rte_flow_error *error)
1054 struct priv *priv = dev->data->dev_private;
1058 priv_flow_flush(priv);
1066 * Called by dev_stop() to remove all flows.
1069 * Pointer to private structure.
1072 mlx4_priv_flow_stop(struct priv *priv)
1074 struct rte_flow *flow;
1076 for (flow = LIST_FIRST(&priv->flows);
1078 flow = LIST_NEXT(flow, next)) {
1079 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1080 flow->ibv_flow = NULL;
1081 DEBUG("Flow %p removed", (void *)flow);
1083 mlx4_flow_destroy_drop_queue(priv);
1090 * Pointer to private structure.
1093 * 0 on success, a errno value otherwise and rte_errno is set.
1096 mlx4_priv_flow_start(struct priv *priv)
1100 struct rte_flow *flow;
1102 ret = mlx4_flow_create_drop_queue(priv);
1105 for (flow = LIST_FIRST(&priv->flows);
1107 flow = LIST_NEXT(flow, next)) {
1108 qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
1109 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1110 if (!flow->ibv_flow) {
1111 DEBUG("Flow %p cannot be applied", (void *)flow);
1115 DEBUG("Flow %p applied", (void *)flow);