4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Flow API operations for mlx4 driver.
39 #include <arpa/inet.h>
45 #include <sys/queue.h>
47 /* Verbs headers do not support -pedantic. */
49 #pragma GCC diagnostic ignored "-Wpedantic"
51 #include <infiniband/verbs.h>
53 #pragma GCC diagnostic error "-Wpedantic"
56 #include <rte_errno.h>
57 #include <rte_eth_ctrl.h>
58 #include <rte_ethdev.h>
60 #include <rte_flow_driver.h>
61 #include <rte_malloc.h>
65 #include "mlx4_flow.h"
66 #include "mlx4_rxtx.h"
67 #include "mlx4_utils.h"
69 /** Static initializer for a list of subsequent item types. */
70 #define NEXT_ITEM(...) \
71 (const enum rte_flow_item_type []){ \
72 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
75 /** Processor structure associated with a flow item. */
76 struct mlx4_flow_proc_item {
77 /** Bit-masks corresponding to the possibilities for the item. */
80 * Default bit-masks to use when item->mask is not provided. When
81 * \default_mask is also NULL, the full supported bit-mask (\mask) is
84 const void *default_mask;
85 /** Bit-masks size in bytes. */
86 const unsigned int mask_sz;
88 * Check support for a given item.
93 * Bit-masks covering supported fields to compare with spec,
97 * Bit-Mask size in bytes.
100 * 0 on success, negative value otherwise.
102 int (*validate)(const struct rte_flow_item *item,
103 const uint8_t *mask, unsigned int size);
105 * Conversion function from rte_flow to NIC specific flow.
108 * rte_flow item to convert.
109 * @param default_mask
110 * Default bit-masks to use when item->mask is not provided.
112 * Internal structure to store the conversion.
115 * 0 on success, negative value otherwise.
117 int (*convert)(const struct rte_flow_item *item,
118 const void *default_mask,
120 /** Size in bytes of the destination structure. */
121 const unsigned int dst_sz;
122 /** List of possible subsequent items. */
123 const enum rte_flow_item_type *const next_item;
126 struct rte_flow_drop {
127 struct ibv_qp *qp; /**< Verbs queue pair. */
128 struct ibv_cq *cq; /**< Verbs completion queue. */
132 * Convert Ethernet item to Verbs specification.
135 * Item specification.
136 * @param default_mask[in]
137 * Default bit-masks to use when item->mask is not provided.
138 * @param data[in, out]
142 mlx4_flow_create_eth(const struct rte_flow_item *item,
143 const void *default_mask,
146 const struct rte_flow_item_eth *spec = item->spec;
147 const struct rte_flow_item_eth *mask = item->mask;
148 struct mlx4_flow *flow = (struct mlx4_flow *)data;
149 struct ibv_flow_spec_eth *eth;
150 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
153 ++flow->ibv_attr->num_of_specs;
154 flow->ibv_attr->priority = 2;
155 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
156 *eth = (struct ibv_flow_spec_eth) {
157 .type = IBV_FLOW_SPEC_ETH,
161 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
166 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
167 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
168 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
169 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
170 /* Remove unwanted bits from values. */
171 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
172 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
173 eth->val.src_mac[i] &= eth->mask.src_mac[i];
179 * Convert VLAN item to Verbs specification.
182 * Item specification.
183 * @param default_mask[in]
184 * Default bit-masks to use when item->mask is not provided.
185 * @param data[in, out]
189 mlx4_flow_create_vlan(const struct rte_flow_item *item,
190 const void *default_mask,
193 const struct rte_flow_item_vlan *spec = item->spec;
194 const struct rte_flow_item_vlan *mask = item->mask;
195 struct mlx4_flow *flow = (struct mlx4_flow *)data;
196 struct ibv_flow_spec_eth *eth;
197 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
199 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
204 eth->val.vlan_tag = spec->tci;
205 eth->mask.vlan_tag = mask->tci;
206 eth->val.vlan_tag &= eth->mask.vlan_tag;
211 * Convert IPv4 item to Verbs specification.
214 * Item specification.
215 * @param default_mask[in]
216 * Default bit-masks to use when item->mask is not provided.
217 * @param data[in, out]
221 mlx4_flow_create_ipv4(const struct rte_flow_item *item,
222 const void *default_mask,
225 const struct rte_flow_item_ipv4 *spec = item->spec;
226 const struct rte_flow_item_ipv4 *mask = item->mask;
227 struct mlx4_flow *flow = (struct mlx4_flow *)data;
228 struct ibv_flow_spec_ipv4 *ipv4;
229 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
231 ++flow->ibv_attr->num_of_specs;
232 flow->ibv_attr->priority = 1;
233 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
234 *ipv4 = (struct ibv_flow_spec_ipv4) {
235 .type = IBV_FLOW_SPEC_IPV4,
240 ipv4->val = (struct ibv_flow_ipv4_filter) {
241 .src_ip = spec->hdr.src_addr,
242 .dst_ip = spec->hdr.dst_addr,
246 ipv4->mask = (struct ibv_flow_ipv4_filter) {
247 .src_ip = mask->hdr.src_addr,
248 .dst_ip = mask->hdr.dst_addr,
250 /* Remove unwanted bits from values. */
251 ipv4->val.src_ip &= ipv4->mask.src_ip;
252 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
257 * Convert UDP item to Verbs specification.
260 * Item specification.
261 * @param default_mask[in]
262 * Default bit-masks to use when item->mask is not provided.
263 * @param data[in, out]
267 mlx4_flow_create_udp(const struct rte_flow_item *item,
268 const void *default_mask,
271 const struct rte_flow_item_udp *spec = item->spec;
272 const struct rte_flow_item_udp *mask = item->mask;
273 struct mlx4_flow *flow = (struct mlx4_flow *)data;
274 struct ibv_flow_spec_tcp_udp *udp;
275 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
277 ++flow->ibv_attr->num_of_specs;
278 flow->ibv_attr->priority = 0;
279 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
280 *udp = (struct ibv_flow_spec_tcp_udp) {
281 .type = IBV_FLOW_SPEC_UDP,
286 udp->val.dst_port = spec->hdr.dst_port;
287 udp->val.src_port = spec->hdr.src_port;
290 udp->mask.dst_port = mask->hdr.dst_port;
291 udp->mask.src_port = mask->hdr.src_port;
292 /* Remove unwanted bits from values. */
293 udp->val.src_port &= udp->mask.src_port;
294 udp->val.dst_port &= udp->mask.dst_port;
299 * Convert TCP item to Verbs specification.
302 * Item specification.
303 * @param default_mask[in]
304 * Default bit-masks to use when item->mask is not provided.
305 * @param data[in, out]
309 mlx4_flow_create_tcp(const struct rte_flow_item *item,
310 const void *default_mask,
313 const struct rte_flow_item_tcp *spec = item->spec;
314 const struct rte_flow_item_tcp *mask = item->mask;
315 struct mlx4_flow *flow = (struct mlx4_flow *)data;
316 struct ibv_flow_spec_tcp_udp *tcp;
317 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
319 ++flow->ibv_attr->num_of_specs;
320 flow->ibv_attr->priority = 0;
321 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
322 *tcp = (struct ibv_flow_spec_tcp_udp) {
323 .type = IBV_FLOW_SPEC_TCP,
328 tcp->val.dst_port = spec->hdr.dst_port;
329 tcp->val.src_port = spec->hdr.src_port;
332 tcp->mask.dst_port = mask->hdr.dst_port;
333 tcp->mask.src_port = mask->hdr.src_port;
334 /* Remove unwanted bits from values. */
335 tcp->val.src_port &= tcp->mask.src_port;
336 tcp->val.dst_port &= tcp->mask.dst_port;
341 * Check support for a given item.
344 * Item specification.
346 * Bit-masks covering supported fields to compare with spec, last and mask in
349 * Bit-Mask size in bytes.
352 * 0 on success, negative value otherwise.
355 mlx4_flow_item_validate(const struct rte_flow_item *item,
356 const uint8_t *mask, unsigned int size)
360 if (!item->spec && (item->mask || item->last))
362 if (item->spec && !item->mask) {
364 const uint8_t *spec = item->spec;
366 for (i = 0; i < size; ++i)
367 if ((spec[i] | mask[i]) != mask[i])
370 if (item->last && !item->mask) {
372 const uint8_t *spec = item->last;
374 for (i = 0; i < size; ++i)
375 if ((spec[i] | mask[i]) != mask[i])
378 if (item->spec && item->last) {
381 const uint8_t *apply = mask;
386 for (i = 0; i < size; ++i) {
387 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
388 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
390 ret = memcmp(spec, last, size);
396 mlx4_flow_validate_eth(const struct rte_flow_item *item,
397 const uint8_t *mask, unsigned int size)
400 const struct rte_flow_item_eth *mask = item->mask;
402 if (mask->dst.addr_bytes[0] != 0xff ||
403 mask->dst.addr_bytes[1] != 0xff ||
404 mask->dst.addr_bytes[2] != 0xff ||
405 mask->dst.addr_bytes[3] != 0xff ||
406 mask->dst.addr_bytes[4] != 0xff ||
407 mask->dst.addr_bytes[5] != 0xff)
410 return mlx4_flow_item_validate(item, mask, size);
414 mlx4_flow_validate_vlan(const struct rte_flow_item *item,
415 const uint8_t *mask, unsigned int size)
418 const struct rte_flow_item_vlan *mask = item->mask;
420 if (mask->tci != 0 &&
421 ntohs(mask->tci) != 0x0fff)
424 return mlx4_flow_item_validate(item, mask, size);
428 mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
429 const uint8_t *mask, unsigned int size)
432 const struct rte_flow_item_ipv4 *mask = item->mask;
434 if (mask->hdr.src_addr != 0 &&
435 mask->hdr.src_addr != 0xffffffff)
437 if (mask->hdr.dst_addr != 0 &&
438 mask->hdr.dst_addr != 0xffffffff)
441 return mlx4_flow_item_validate(item, mask, size);
445 mlx4_flow_validate_udp(const struct rte_flow_item *item,
446 const uint8_t *mask, unsigned int size)
449 const struct rte_flow_item_udp *mask = item->mask;
451 if (mask->hdr.src_port != 0 &&
452 mask->hdr.src_port != 0xffff)
454 if (mask->hdr.dst_port != 0 &&
455 mask->hdr.dst_port != 0xffff)
458 return mlx4_flow_item_validate(item, mask, size);
462 mlx4_flow_validate_tcp(const struct rte_flow_item *item,
463 const uint8_t *mask, unsigned int size)
466 const struct rte_flow_item_tcp *mask = item->mask;
468 if (mask->hdr.src_port != 0 &&
469 mask->hdr.src_port != 0xffff)
471 if (mask->hdr.dst_port != 0 &&
472 mask->hdr.dst_port != 0xffff)
475 return mlx4_flow_item_validate(item, mask, size);
478 /** Graph of supported items and associated actions. */
479 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
480 [RTE_FLOW_ITEM_TYPE_END] = {
481 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
483 [RTE_FLOW_ITEM_TYPE_ETH] = {
484 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
485 RTE_FLOW_ITEM_TYPE_IPV4),
486 .mask = &(const struct rte_flow_item_eth){
487 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
488 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
490 .default_mask = &rte_flow_item_eth_mask,
491 .mask_sz = sizeof(struct rte_flow_item_eth),
492 .validate = mlx4_flow_validate_eth,
493 .convert = mlx4_flow_create_eth,
494 .dst_sz = sizeof(struct ibv_flow_spec_eth),
496 [RTE_FLOW_ITEM_TYPE_VLAN] = {
497 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
498 .mask = &(const struct rte_flow_item_vlan){
499 /* rte_flow_item_vlan_mask is invalid for mlx4. */
500 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
506 .mask_sz = sizeof(struct rte_flow_item_vlan),
507 .validate = mlx4_flow_validate_vlan,
508 .convert = mlx4_flow_create_vlan,
511 [RTE_FLOW_ITEM_TYPE_IPV4] = {
512 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
513 RTE_FLOW_ITEM_TYPE_TCP),
514 .mask = &(const struct rte_flow_item_ipv4){
520 .default_mask = &rte_flow_item_ipv4_mask,
521 .mask_sz = sizeof(struct rte_flow_item_ipv4),
522 .validate = mlx4_flow_validate_ipv4,
523 .convert = mlx4_flow_create_ipv4,
524 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
526 [RTE_FLOW_ITEM_TYPE_UDP] = {
527 .mask = &(const struct rte_flow_item_udp){
533 .default_mask = &rte_flow_item_udp_mask,
534 .mask_sz = sizeof(struct rte_flow_item_udp),
535 .validate = mlx4_flow_validate_udp,
536 .convert = mlx4_flow_create_udp,
537 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
539 [RTE_FLOW_ITEM_TYPE_TCP] = {
540 .mask = &(const struct rte_flow_item_tcp){
546 .default_mask = &rte_flow_item_tcp_mask,
547 .mask_sz = sizeof(struct rte_flow_item_tcp),
548 .validate = mlx4_flow_validate_tcp,
549 .convert = mlx4_flow_create_tcp,
550 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
555 * Make sure a flow rule is supported and initialize associated structure.
558 * Pointer to private structure.
560 * Flow rule attributes.
562 * Pattern specification (list terminated by the END pattern item).
564 * Associated actions (list terminated by the END action).
566 * Perform verbose error reporting if not NULL.
567 * @param[in, out] flow
568 * Flow structure to update.
571 * 0 on success, a negative errno value otherwise and rte_errno is set.
574 mlx4_flow_prepare(struct priv *priv,
575 const struct rte_flow_attr *attr,
576 const struct rte_flow_item pattern[],
577 const struct rte_flow_action actions[],
578 struct rte_flow_error *error,
579 struct mlx4_flow *flow)
581 const struct rte_flow_item *item;
582 const struct rte_flow_action *action;
583 const struct mlx4_flow_proc_item *proc = mlx4_flow_proc_item_list;
584 struct mlx4_flow_target target = {
588 uint32_t priority_override = 0;
591 rte_flow_error_set(error, ENOTSUP,
592 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
594 "groups are not supported");
597 if (priv->isolated) {
598 priority_override = attr->priority;
599 } else if (attr->priority) {
600 rte_flow_error_set(error, ENOTSUP,
601 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
603 "priorities are not supported outside"
607 if (attr->priority > MLX4_FLOW_PRIORITY_LAST) {
608 rte_flow_error_set(error, ENOTSUP,
609 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
611 "maximum priority level is "
612 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
616 rte_flow_error_set(error, ENOTSUP,
617 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
619 "egress is not supported");
622 if (!attr->ingress) {
623 rte_flow_error_set(error, ENOTSUP,
624 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
626 "only ingress is supported");
629 /* Go over pattern. */
630 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
631 const struct mlx4_flow_proc_item *next = NULL;
635 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
638 * The nic can support patterns with NULL eth spec only
639 * if eth is a single item in a rule.
641 if (!item->spec && item->type == RTE_FLOW_ITEM_TYPE_ETH) {
642 const struct rte_flow_item *next = item + 1;
644 if (next->type != RTE_FLOW_ITEM_TYPE_END) {
645 rte_flow_error_set(error, ENOTSUP,
646 RTE_FLOW_ERROR_TYPE_ITEM,
649 " an Ethernet spec");
655 proc->next_item[i] != RTE_FLOW_ITEM_TYPE_END;
657 if (proc->next_item[i] == item->type) {
658 next = &mlx4_flow_proc_item_list[item->type];
663 goto exit_item_not_supported;
665 err = proc->validate(item, proc->mask, proc->mask_sz);
667 goto exit_item_not_supported;
668 if (flow->ibv_attr && proc->convert) {
669 err = proc->convert(item,
670 (proc->default_mask ?
675 goto exit_item_not_supported;
677 flow->offset += proc->dst_sz;
679 /* Use specified priority level when in isolated mode. */
680 if (priv->isolated && flow->ibv_attr)
681 flow->ibv_attr->priority = priority_override;
682 /* Go over actions list. */
683 for (action = actions;
684 action->type != RTE_FLOW_ACTION_TYPE_END;
686 if (action->type == RTE_FLOW_ACTION_TYPE_VOID) {
688 } else if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
690 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
691 const struct rte_flow_action_queue *queue =
694 if (!queue || (queue->index >
695 (priv->dev->data->nb_rx_queues - 1)))
696 goto exit_action_not_supported;
699 goto exit_action_not_supported;
702 if (!target.queue && !target.drop) {
703 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
704 NULL, "no valid action");
708 exit_item_not_supported:
709 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
710 item, "item not supported");
712 exit_action_not_supported:
713 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
714 action, "action not supported");
719 * Validate a flow supported by the NIC.
721 * @see rte_flow_validate()
725 mlx4_flow_validate(struct rte_eth_dev *dev,
726 const struct rte_flow_attr *attr,
727 const struct rte_flow_item pattern[],
728 const struct rte_flow_action actions[],
729 struct rte_flow_error *error)
731 struct priv *priv = dev->data->dev_private;
732 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
734 return mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
738 * Destroy a drop queue.
741 * Pointer to private structure.
744 mlx4_flow_destroy_drop_queue(struct priv *priv)
746 if (priv->flow_drop_queue) {
747 struct rte_flow_drop *fdq = priv->flow_drop_queue;
749 priv->flow_drop_queue = NULL;
750 claim_zero(ibv_destroy_qp(fdq->qp));
751 claim_zero(ibv_destroy_cq(fdq->cq));
757 * Create a single drop queue for all drop flows.
760 * Pointer to private structure.
763 * 0 on success, negative value otherwise.
766 mlx4_flow_create_drop_queue(struct priv *priv)
770 struct rte_flow_drop *fdq;
772 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
774 ERROR("Cannot allocate memory for drop struct");
777 cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
779 ERROR("Cannot create drop CQ");
782 qp = ibv_create_qp(priv->pd,
783 &(struct ibv_qp_init_attr){
790 .qp_type = IBV_QPT_RAW_PACKET,
793 ERROR("Cannot create drop QP");
796 *fdq = (struct rte_flow_drop){
800 priv->flow_drop_queue = fdq;
803 claim_zero(ibv_destroy_cq(cq));
811 * Complete flow rule creation.
814 * Pointer to private structure.
816 * Verbs flow attributes.
818 * Rule target descriptor.
820 * Perform verbose error reporting if not NULL.
823 * A flow if the rule could be created.
825 static struct rte_flow *
826 mlx4_flow_create_target_queue(struct priv *priv,
827 struct ibv_flow_attr *ibv_attr,
828 struct mlx4_flow_target *target,
829 struct rte_flow_error *error)
832 struct rte_flow *rte_flow;
836 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
838 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
839 NULL, "cannot allocate flow memory");
843 qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
845 struct rxq *rxq = priv->dev->data->rx_queues[target->queue_id];
850 rte_flow->ibv_attr = ibv_attr;
853 rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
854 if (!rte_flow->ibv_flow) {
855 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
856 NULL, "flow rule creation failure");
868 * @see rte_flow_create()
871 static struct rte_flow *
872 mlx4_flow_create(struct rte_eth_dev *dev,
873 const struct rte_flow_attr *attr,
874 const struct rte_flow_item pattern[],
875 const struct rte_flow_action actions[],
876 struct rte_flow_error *error)
878 const struct rte_flow_action *action;
879 struct priv *priv = dev->data->dev_private;
880 struct rte_flow *rte_flow;
881 struct mlx4_flow_target target;
882 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
885 err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
888 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
889 if (!flow.ibv_attr) {
890 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
891 NULL, "cannot allocate ibv_attr memory");
894 flow.offset = sizeof(struct ibv_flow_attr);
895 *flow.ibv_attr = (struct ibv_flow_attr){
897 .type = IBV_FLOW_ATTR_NORMAL,
898 .size = sizeof(struct ibv_flow_attr),
899 .priority = attr->priority,
904 claim_zero(mlx4_flow_prepare(priv, attr, pattern, actions,
906 target = (struct mlx4_flow_target){
910 for (action = actions;
911 action->type != RTE_FLOW_ACTION_TYPE_END;
913 if (action->type == RTE_FLOW_ACTION_TYPE_VOID) {
915 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
918 ((const struct rte_flow_action_queue *)
919 action->conf)->index;
920 } else if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
923 rte_flow_error_set(error, ENOTSUP,
924 RTE_FLOW_ERROR_TYPE_ACTION,
925 action, "unsupported action");
929 rte_flow = mlx4_flow_create_target_queue(priv, flow.ibv_attr,
932 LIST_INSERT_HEAD(&priv->flows, rte_flow, next);
933 DEBUG("Flow created %p", (void *)rte_flow);
937 rte_free(flow.ibv_attr);
942 * Configure isolated mode.
944 * @see rte_flow_isolate()
948 mlx4_flow_isolate(struct rte_eth_dev *dev,
950 struct rte_flow_error *error)
952 struct priv *priv = dev->data->dev_private;
954 if (!!enable == !!priv->isolated)
956 priv->isolated = !!enable;
958 mlx4_mac_addr_del(priv);
959 } else if (mlx4_mac_addr_add(priv) < 0) {
961 return rte_flow_error_set(error, rte_errno,
962 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
963 NULL, "cannot leave isolated mode");
971 * @see rte_flow_destroy()
975 mlx4_flow_destroy(struct rte_eth_dev *dev,
976 struct rte_flow *flow,
977 struct rte_flow_error *error)
981 LIST_REMOVE(flow, next);
983 claim_zero(ibv_destroy_flow(flow->ibv_flow));
984 rte_free(flow->ibv_attr);
985 DEBUG("Flow destroyed %p", (void *)flow);
993 * @see rte_flow_flush()
997 mlx4_flow_flush(struct rte_eth_dev *dev,
998 struct rte_flow_error *error)
1000 struct priv *priv = dev->data->dev_private;
1002 while (!LIST_EMPTY(&priv->flows)) {
1003 struct rte_flow *flow;
1005 flow = LIST_FIRST(&priv->flows);
1006 mlx4_flow_destroy(dev, flow, error);
1014 * Called by dev_stop() to remove all flows.
1017 * Pointer to private structure.
1020 mlx4_flow_stop(struct priv *priv)
1022 struct rte_flow *flow;
1024 for (flow = LIST_FIRST(&priv->flows);
1026 flow = LIST_NEXT(flow, next)) {
1027 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1028 flow->ibv_flow = NULL;
1029 DEBUG("Flow %p removed", (void *)flow);
1031 mlx4_flow_destroy_drop_queue(priv);
1038 * Pointer to private structure.
1041 * 0 on success, a errno value otherwise and rte_errno is set.
1044 mlx4_flow_start(struct priv *priv)
1048 struct rte_flow *flow;
1050 ret = mlx4_flow_create_drop_queue(priv);
1053 for (flow = LIST_FIRST(&priv->flows);
1055 flow = LIST_NEXT(flow, next)) {
1056 qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
1057 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1058 if (!flow->ibv_flow) {
1059 DEBUG("Flow %p cannot be applied", (void *)flow);
1063 DEBUG("Flow %p applied", (void *)flow);
1068 static const struct rte_flow_ops mlx4_flow_ops = {
1069 .validate = mlx4_flow_validate,
1070 .create = mlx4_flow_create,
1071 .destroy = mlx4_flow_destroy,
1072 .flush = mlx4_flow_flush,
1073 .isolate = mlx4_flow_isolate,
1077 * Manage filter operations.
1080 * Pointer to Ethernet device structure.
1081 * @param filter_type
1084 * Operation to perform.
1086 * Pointer to operation-specific structure.
1089 * 0 on success, negative errno value otherwise and rte_errno is set.
1092 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1093 enum rte_filter_type filter_type,
1094 enum rte_filter_op filter_op,
1097 switch (filter_type) {
1098 case RTE_ETH_FILTER_GENERIC:
1099 if (filter_op != RTE_ETH_FILTER_GET)
1101 *(const void **)arg = &mlx4_flow_ops;
1104 ERROR("%p: filter type (%d) not supported",
1105 (void *)dev, filter_type);
1108 rte_errno = ENOTSUP;