4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_flow_driver.h>
38 #include <rte_malloc.h>
42 #include "mlx4_flow.h"
43 #include "mlx4_utils.h"
45 /** Static initializer for items. */
47 (const enum rte_flow_item_type []){ \
48 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
51 /** Structure to generate a simple graph of layers supported by the NIC. */
52 struct mlx4_flow_items {
53 /** List of possible actions for these items. */
54 const enum rte_flow_action_type *const actions;
55 /** Bit-masks corresponding to the possibilities for the item. */
58 * Default bit-masks to use when item->mask is not provided. When
59 * \default_mask is also NULL, the full supported bit-mask (\mask) is
62 const void *default_mask;
63 /** Bit-masks size in bytes. */
64 const unsigned int mask_sz;
66 * Check support for a given item.
71 * Bit-masks covering supported fields to compare with spec,
75 * Bit-Mask size in bytes.
78 * 0 on success, negative value otherwise.
80 int (*validate)(const struct rte_flow_item *item,
81 const uint8_t *mask, unsigned int size);
83 * Conversion function from rte_flow to NIC specific flow.
86 * rte_flow item to convert.
88 * Default bit-masks to use when item->mask is not provided.
90 * Internal structure to store the conversion.
93 * 0 on success, negative value otherwise.
95 int (*convert)(const struct rte_flow_item *item,
96 const void *default_mask,
98 /** Size in bytes of the destination structure. */
99 const unsigned int dst_sz;
100 /** List of possible following items. */
101 const enum rte_flow_item_type *const items;
104 struct rte_flow_drop {
105 struct ibv_qp *qp; /**< Verbs queue pair. */
106 struct ibv_cq *cq; /**< Verbs completion queue. */
109 /** Valid action for this PMD. */
110 static const enum rte_flow_action_type valid_actions[] = {
111 RTE_FLOW_ACTION_TYPE_DROP,
112 RTE_FLOW_ACTION_TYPE_QUEUE,
113 RTE_FLOW_ACTION_TYPE_END,
117 * Convert Ethernet item to Verbs specification.
120 * Item specification.
121 * @param default_mask[in]
122 * Default bit-masks to use when item->mask is not provided.
123 * @param data[in, out]
127 mlx4_flow_create_eth(const struct rte_flow_item *item,
128 const void *default_mask,
131 const struct rte_flow_item_eth *spec = item->spec;
132 const struct rte_flow_item_eth *mask = item->mask;
133 struct mlx4_flow *flow = (struct mlx4_flow *)data;
134 struct ibv_flow_spec_eth *eth;
135 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
138 ++flow->ibv_attr->num_of_specs;
139 flow->ibv_attr->priority = 2;
140 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
141 *eth = (struct ibv_flow_spec_eth) {
142 .type = IBV_FLOW_SPEC_ETH,
146 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
151 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
152 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
153 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
154 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
155 /* Remove unwanted bits from values. */
156 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
157 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
158 eth->val.src_mac[i] &= eth->mask.src_mac[i];
164 * Convert VLAN item to Verbs specification.
167 * Item specification.
168 * @param default_mask[in]
169 * Default bit-masks to use when item->mask is not provided.
170 * @param data[in, out]
174 mlx4_flow_create_vlan(const struct rte_flow_item *item,
175 const void *default_mask,
178 const struct rte_flow_item_vlan *spec = item->spec;
179 const struct rte_flow_item_vlan *mask = item->mask;
180 struct mlx4_flow *flow = (struct mlx4_flow *)data;
181 struct ibv_flow_spec_eth *eth;
182 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
184 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
189 eth->val.vlan_tag = spec->tci;
190 eth->mask.vlan_tag = mask->tci;
191 eth->val.vlan_tag &= eth->mask.vlan_tag;
196 * Convert IPv4 item to Verbs specification.
199 * Item specification.
200 * @param default_mask[in]
201 * Default bit-masks to use when item->mask is not provided.
202 * @param data[in, out]
206 mlx4_flow_create_ipv4(const struct rte_flow_item *item,
207 const void *default_mask,
210 const struct rte_flow_item_ipv4 *spec = item->spec;
211 const struct rte_flow_item_ipv4 *mask = item->mask;
212 struct mlx4_flow *flow = (struct mlx4_flow *)data;
213 struct ibv_flow_spec_ipv4 *ipv4;
214 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
216 ++flow->ibv_attr->num_of_specs;
217 flow->ibv_attr->priority = 1;
218 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
219 *ipv4 = (struct ibv_flow_spec_ipv4) {
220 .type = IBV_FLOW_SPEC_IPV4,
225 ipv4->val = (struct ibv_flow_ipv4_filter) {
226 .src_ip = spec->hdr.src_addr,
227 .dst_ip = spec->hdr.dst_addr,
231 ipv4->mask = (struct ibv_flow_ipv4_filter) {
232 .src_ip = mask->hdr.src_addr,
233 .dst_ip = mask->hdr.dst_addr,
235 /* Remove unwanted bits from values. */
236 ipv4->val.src_ip &= ipv4->mask.src_ip;
237 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
242 * Convert UDP item to Verbs specification.
245 * Item specification.
246 * @param default_mask[in]
247 * Default bit-masks to use when item->mask is not provided.
248 * @param data[in, out]
252 mlx4_flow_create_udp(const struct rte_flow_item *item,
253 const void *default_mask,
256 const struct rte_flow_item_udp *spec = item->spec;
257 const struct rte_flow_item_udp *mask = item->mask;
258 struct mlx4_flow *flow = (struct mlx4_flow *)data;
259 struct ibv_flow_spec_tcp_udp *udp;
260 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
262 ++flow->ibv_attr->num_of_specs;
263 flow->ibv_attr->priority = 0;
264 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
265 *udp = (struct ibv_flow_spec_tcp_udp) {
266 .type = IBV_FLOW_SPEC_UDP,
271 udp->val.dst_port = spec->hdr.dst_port;
272 udp->val.src_port = spec->hdr.src_port;
275 udp->mask.dst_port = mask->hdr.dst_port;
276 udp->mask.src_port = mask->hdr.src_port;
277 /* Remove unwanted bits from values. */
278 udp->val.src_port &= udp->mask.src_port;
279 udp->val.dst_port &= udp->mask.dst_port;
284 * Convert TCP item to Verbs specification.
287 * Item specification.
288 * @param default_mask[in]
289 * Default bit-masks to use when item->mask is not provided.
290 * @param data[in, out]
294 mlx4_flow_create_tcp(const struct rte_flow_item *item,
295 const void *default_mask,
298 const struct rte_flow_item_tcp *spec = item->spec;
299 const struct rte_flow_item_tcp *mask = item->mask;
300 struct mlx4_flow *flow = (struct mlx4_flow *)data;
301 struct ibv_flow_spec_tcp_udp *tcp;
302 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
304 ++flow->ibv_attr->num_of_specs;
305 flow->ibv_attr->priority = 0;
306 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
307 *tcp = (struct ibv_flow_spec_tcp_udp) {
308 .type = IBV_FLOW_SPEC_TCP,
313 tcp->val.dst_port = spec->hdr.dst_port;
314 tcp->val.src_port = spec->hdr.src_port;
317 tcp->mask.dst_port = mask->hdr.dst_port;
318 tcp->mask.src_port = mask->hdr.src_port;
319 /* Remove unwanted bits from values. */
320 tcp->val.src_port &= tcp->mask.src_port;
321 tcp->val.dst_port &= tcp->mask.dst_port;
326 * Check support for a given item.
329 * Item specification.
331 * Bit-masks covering supported fields to compare with spec, last and mask in
334 * Bit-Mask size in bytes.
337 * 0 on success, negative value otherwise.
340 mlx4_flow_item_validate(const struct rte_flow_item *item,
341 const uint8_t *mask, unsigned int size)
345 if (!item->spec && (item->mask || item->last))
347 if (item->spec && !item->mask) {
349 const uint8_t *spec = item->spec;
351 for (i = 0; i < size; ++i)
352 if ((spec[i] | mask[i]) != mask[i])
355 if (item->last && !item->mask) {
357 const uint8_t *spec = item->last;
359 for (i = 0; i < size; ++i)
360 if ((spec[i] | mask[i]) != mask[i])
363 if (item->spec && item->last) {
366 const uint8_t *apply = mask;
371 for (i = 0; i < size; ++i) {
372 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
373 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
375 ret = memcmp(spec, last, size);
381 mlx4_flow_validate_eth(const struct rte_flow_item *item,
382 const uint8_t *mask, unsigned int size)
385 const struct rte_flow_item_eth *mask = item->mask;
387 if (mask->dst.addr_bytes[0] != 0xff ||
388 mask->dst.addr_bytes[1] != 0xff ||
389 mask->dst.addr_bytes[2] != 0xff ||
390 mask->dst.addr_bytes[3] != 0xff ||
391 mask->dst.addr_bytes[4] != 0xff ||
392 mask->dst.addr_bytes[5] != 0xff)
395 return mlx4_flow_item_validate(item, mask, size);
399 mlx4_flow_validate_vlan(const struct rte_flow_item *item,
400 const uint8_t *mask, unsigned int size)
403 const struct rte_flow_item_vlan *mask = item->mask;
405 if (mask->tci != 0 &&
406 ntohs(mask->tci) != 0x0fff)
409 return mlx4_flow_item_validate(item, mask, size);
413 mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
414 const uint8_t *mask, unsigned int size)
417 const struct rte_flow_item_ipv4 *mask = item->mask;
419 if (mask->hdr.src_addr != 0 &&
420 mask->hdr.src_addr != 0xffffffff)
422 if (mask->hdr.dst_addr != 0 &&
423 mask->hdr.dst_addr != 0xffffffff)
426 return mlx4_flow_item_validate(item, mask, size);
430 mlx4_flow_validate_udp(const struct rte_flow_item *item,
431 const uint8_t *mask, unsigned int size)
434 const struct rte_flow_item_udp *mask = item->mask;
436 if (mask->hdr.src_port != 0 &&
437 mask->hdr.src_port != 0xffff)
439 if (mask->hdr.dst_port != 0 &&
440 mask->hdr.dst_port != 0xffff)
443 return mlx4_flow_item_validate(item, mask, size);
447 mlx4_flow_validate_tcp(const struct rte_flow_item *item,
448 const uint8_t *mask, unsigned int size)
451 const struct rte_flow_item_tcp *mask = item->mask;
453 if (mask->hdr.src_port != 0 &&
454 mask->hdr.src_port != 0xffff)
456 if (mask->hdr.dst_port != 0 &&
457 mask->hdr.dst_port != 0xffff)
460 return mlx4_flow_item_validate(item, mask, size);
463 /** Graph of supported items and associated actions. */
464 static const struct mlx4_flow_items mlx4_flow_items[] = {
465 [RTE_FLOW_ITEM_TYPE_END] = {
466 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
468 [RTE_FLOW_ITEM_TYPE_ETH] = {
469 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
470 RTE_FLOW_ITEM_TYPE_IPV4),
471 .actions = valid_actions,
472 .mask = &(const struct rte_flow_item_eth){
473 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
474 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
476 .default_mask = &rte_flow_item_eth_mask,
477 .mask_sz = sizeof(struct rte_flow_item_eth),
478 .validate = mlx4_flow_validate_eth,
479 .convert = mlx4_flow_create_eth,
480 .dst_sz = sizeof(struct ibv_flow_spec_eth),
482 [RTE_FLOW_ITEM_TYPE_VLAN] = {
483 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
484 .actions = valid_actions,
485 .mask = &(const struct rte_flow_item_vlan){
486 /* rte_flow_item_vlan_mask is invalid for mlx4. */
487 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
493 .mask_sz = sizeof(struct rte_flow_item_vlan),
494 .validate = mlx4_flow_validate_vlan,
495 .convert = mlx4_flow_create_vlan,
498 [RTE_FLOW_ITEM_TYPE_IPV4] = {
499 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
500 RTE_FLOW_ITEM_TYPE_TCP),
501 .actions = valid_actions,
502 .mask = &(const struct rte_flow_item_ipv4){
508 .default_mask = &rte_flow_item_ipv4_mask,
509 .mask_sz = sizeof(struct rte_flow_item_ipv4),
510 .validate = mlx4_flow_validate_ipv4,
511 .convert = mlx4_flow_create_ipv4,
512 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
514 [RTE_FLOW_ITEM_TYPE_UDP] = {
515 .actions = valid_actions,
516 .mask = &(const struct rte_flow_item_udp){
522 .default_mask = &rte_flow_item_udp_mask,
523 .mask_sz = sizeof(struct rte_flow_item_udp),
524 .validate = mlx4_flow_validate_udp,
525 .convert = mlx4_flow_create_udp,
526 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
528 [RTE_FLOW_ITEM_TYPE_TCP] = {
529 .actions = valid_actions,
530 .mask = &(const struct rte_flow_item_tcp){
536 .default_mask = &rte_flow_item_tcp_mask,
537 .mask_sz = sizeof(struct rte_flow_item_tcp),
538 .validate = mlx4_flow_validate_tcp,
539 .convert = mlx4_flow_create_tcp,
540 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
545 * Validate a flow supported by the NIC.
548 * Pointer to private structure.
550 * Flow rule attributes.
552 * Pattern specification (list terminated by the END pattern item).
554 * Associated actions (list terminated by the END action).
556 * Perform verbose error reporting if not NULL.
557 * @param[in, out] flow
558 * Flow structure to update.
561 * 0 on success, a negative errno value otherwise and rte_errno is set.
564 priv_flow_validate(struct priv *priv,
565 const struct rte_flow_attr *attr,
566 const struct rte_flow_item items[],
567 const struct rte_flow_action actions[],
568 struct rte_flow_error *error,
569 struct mlx4_flow *flow)
571 const struct mlx4_flow_items *cur_item = mlx4_flow_items;
572 struct mlx4_flow_action action = {
579 rte_flow_error_set(error, ENOTSUP,
580 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
582 "groups are not supported");
585 if (attr->priority) {
586 rte_flow_error_set(error, ENOTSUP,
587 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
589 "priorities are not supported");
593 rte_flow_error_set(error, ENOTSUP,
594 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
596 "egress is not supported");
599 if (!attr->ingress) {
600 rte_flow_error_set(error, ENOTSUP,
601 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
603 "only ingress is supported");
606 /* Go over items list. */
607 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
608 const struct mlx4_flow_items *token = NULL;
612 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
615 * The nic can support patterns with NULL eth spec only
616 * if eth is a single item in a rule.
619 items->type == RTE_FLOW_ITEM_TYPE_ETH) {
620 const struct rte_flow_item *next = items + 1;
622 if (next->type != RTE_FLOW_ITEM_TYPE_END) {
623 rte_flow_error_set(error, ENOTSUP,
624 RTE_FLOW_ERROR_TYPE_ITEM,
627 " an Ethernet spec");
633 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
635 if (cur_item->items[i] == items->type) {
636 token = &mlx4_flow_items[items->type];
641 goto exit_item_not_supported;
643 err = cur_item->validate(items,
644 (const uint8_t *)cur_item->mask,
647 goto exit_item_not_supported;
648 if (flow->ibv_attr && cur_item->convert) {
649 err = cur_item->convert(items,
650 (cur_item->default_mask ?
651 cur_item->default_mask :
655 goto exit_item_not_supported;
657 flow->offset += cur_item->dst_sz;
659 /* Go over actions list */
660 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
661 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
663 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
665 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
666 const struct rte_flow_action_queue *queue =
667 (const struct rte_flow_action_queue *)
670 if (!queue || (queue->index > (priv->rxqs_n - 1)))
671 goto exit_action_not_supported;
674 goto exit_action_not_supported;
677 if (!action.queue && !action.drop) {
678 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
679 NULL, "no valid action");
683 exit_item_not_supported:
684 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
685 items, "item not supported");
687 exit_action_not_supported:
688 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
689 actions, "action not supported");
694 * Validate a flow supported by the NIC.
696 * @see rte_flow_validate()
700 mlx4_flow_validate(struct rte_eth_dev *dev,
701 const struct rte_flow_attr *attr,
702 const struct rte_flow_item items[],
703 const struct rte_flow_action actions[],
704 struct rte_flow_error *error)
706 struct priv *priv = dev->data->dev_private;
707 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
709 return priv_flow_validate(priv, attr, items, actions, error, &flow);
713 * Destroy a drop queue.
716 * Pointer to private structure.
719 mlx4_flow_destroy_drop_queue(struct priv *priv)
721 if (priv->flow_drop_queue) {
722 struct rte_flow_drop *fdq = priv->flow_drop_queue;
724 priv->flow_drop_queue = NULL;
725 claim_zero(ibv_destroy_qp(fdq->qp));
726 claim_zero(ibv_destroy_cq(fdq->cq));
732 * Create a single drop queue for all drop flows.
735 * Pointer to private structure.
738 * 0 on success, negative value otherwise.
741 mlx4_flow_create_drop_queue(struct priv *priv)
745 struct rte_flow_drop *fdq;
747 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
749 ERROR("Cannot allocate memory for drop struct");
752 cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
754 ERROR("Cannot create drop CQ");
757 qp = ibv_create_qp(priv->pd,
758 &(struct ibv_qp_init_attr){
765 .qp_type = IBV_QPT_RAW_PACKET,
768 ERROR("Cannot create drop QP");
771 *fdq = (struct rte_flow_drop){
775 priv->flow_drop_queue = fdq;
778 claim_zero(ibv_destroy_cq(cq));
786 * Complete flow rule creation.
789 * Pointer to private structure.
791 * Verbs flow attributes.
793 * Target action structure.
795 * Perform verbose error reporting if not NULL.
798 * A flow if the rule could be created.
800 static struct rte_flow *
801 priv_flow_create_action_queue(struct priv *priv,
802 struct ibv_flow_attr *ibv_attr,
803 struct mlx4_flow_action *action,
804 struct rte_flow_error *error)
807 struct rte_flow *rte_flow;
811 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
813 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
814 NULL, "cannot allocate flow memory");
818 qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
820 struct rxq *rxq = (*priv->rxqs)[action->queue_id];
825 rte_flow->ibv_attr = ibv_attr;
828 rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
829 if (!rte_flow->ibv_flow) {
830 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
831 NULL, "flow rule creation failure");
843 * @see rte_flow_create()
847 mlx4_flow_create(struct rte_eth_dev *dev,
848 const struct rte_flow_attr *attr,
849 const struct rte_flow_item items[],
850 const struct rte_flow_action actions[],
851 struct rte_flow_error *error)
853 struct priv *priv = dev->data->dev_private;
854 struct rte_flow *rte_flow;
855 struct mlx4_flow_action action;
856 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
859 err = priv_flow_validate(priv, attr, items, actions, error, &flow);
862 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
863 if (!flow.ibv_attr) {
864 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
865 NULL, "cannot allocate ibv_attr memory");
868 flow.offset = sizeof(struct ibv_flow_attr);
869 *flow.ibv_attr = (struct ibv_flow_attr){
871 .type = IBV_FLOW_ATTR_NORMAL,
872 .size = sizeof(struct ibv_flow_attr),
873 .priority = attr->priority,
878 claim_zero(priv_flow_validate(priv, attr, items, actions,
880 action = (struct mlx4_flow_action){
884 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
885 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
887 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
890 ((const struct rte_flow_action_queue *)
891 actions->conf)->index;
892 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
895 rte_flow_error_set(error, ENOTSUP,
896 RTE_FLOW_ERROR_TYPE_ACTION,
897 actions, "unsupported action");
901 rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
904 LIST_INSERT_HEAD(&priv->flows, rte_flow, next);
905 DEBUG("Flow created %p", (void *)rte_flow);
909 rte_free(flow.ibv_attr);
914 * @see rte_flow_isolate()
916 * Must be done before calling dev_configure().
919 * Pointer to the ethernet device structure.
921 * Nonzero to enter isolated mode, attempt to leave it otherwise.
923 * Perform verbose error reporting if not NULL. PMDs initialize this
924 * structure in case of error only.
927 * 0 on success, a negative value on error.
930 mlx4_flow_isolate(struct rte_eth_dev *dev,
932 struct rte_flow_error *error)
934 struct priv *priv = dev->data->dev_private;
937 rte_flow_error_set(error, ENOTSUP,
938 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
939 NULL, "isolated mode must be set"
940 " before configuring the device");
943 priv->isolated = !!enable;
950 * @see rte_flow_destroy()
954 mlx4_flow_destroy(struct rte_eth_dev *dev,
955 struct rte_flow *flow,
956 struct rte_flow_error *error)
960 LIST_REMOVE(flow, next);
962 claim_zero(ibv_destroy_flow(flow->ibv_flow));
963 rte_free(flow->ibv_attr);
964 DEBUG("Flow destroyed %p", (void *)flow);
972 * @see rte_flow_flush()
976 mlx4_flow_flush(struct rte_eth_dev *dev,
977 struct rte_flow_error *error)
979 struct priv *priv = dev->data->dev_private;
981 while (!LIST_EMPTY(&priv->flows)) {
982 struct rte_flow *flow;
984 flow = LIST_FIRST(&priv->flows);
985 mlx4_flow_destroy(dev, flow, error);
993 * Called by dev_stop() to remove all flows.
996 * Pointer to private structure.
999 mlx4_priv_flow_stop(struct priv *priv)
1001 struct rte_flow *flow;
1003 for (flow = LIST_FIRST(&priv->flows);
1005 flow = LIST_NEXT(flow, next)) {
1006 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1007 flow->ibv_flow = NULL;
1008 DEBUG("Flow %p removed", (void *)flow);
1010 mlx4_flow_destroy_drop_queue(priv);
1017 * Pointer to private structure.
1020 * 0 on success, a errno value otherwise and rte_errno is set.
1023 mlx4_priv_flow_start(struct priv *priv)
1027 struct rte_flow *flow;
1029 ret = mlx4_flow_create_drop_queue(priv);
1032 for (flow = LIST_FIRST(&priv->flows);
1034 flow = LIST_NEXT(flow, next)) {
1035 qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
1036 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1037 if (!flow->ibv_flow) {
1038 DEBUG("Flow %p cannot be applied", (void *)flow);
1042 DEBUG("Flow %p applied", (void *)flow);