4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Flow API operations for mlx4 driver.
39 #include <arpa/inet.h>
45 #include <sys/queue.h>
47 /* Verbs headers do not support -pedantic. */
49 #pragma GCC diagnostic ignored "-Wpedantic"
51 #include <infiniband/verbs.h>
53 #pragma GCC diagnostic error "-Wpedantic"
56 #include <rte_byteorder.h>
57 #include <rte_errno.h>
58 #include <rte_eth_ctrl.h>
59 #include <rte_ethdev.h>
61 #include <rte_flow_driver.h>
62 #include <rte_malloc.h>
66 #include "mlx4_flow.h"
67 #include "mlx4_rxtx.h"
68 #include "mlx4_utils.h"
70 /** Static initializer for a list of subsequent item types. */
71 #define NEXT_ITEM(...) \
72 (const enum rte_flow_item_type []){ \
73 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
76 /** Processor structure associated with a flow item. */
77 struct mlx4_flow_proc_item {
78 /** Bit-masks corresponding to the possibilities for the item. */
81 * Default bit-masks to use when item->mask is not provided. When
82 * \default_mask is also NULL, the full supported bit-mask (\mask) is
85 const void *default_mask;
86 /** Bit-masks size in bytes. */
87 const unsigned int mask_sz;
89 * Check support for a given item.
94 * Bit-masks covering supported fields to compare with spec,
98 * Bit-Mask size in bytes.
101 * 0 on success, negative value otherwise.
103 int (*validate)(const struct rte_flow_item *item,
104 const uint8_t *mask, unsigned int size);
106 * Conversion function from rte_flow to NIC specific flow.
109 * rte_flow item to convert.
110 * @param default_mask
111 * Default bit-masks to use when item->mask is not provided.
113 * Internal structure to store the conversion.
116 * 0 on success, negative value otherwise.
118 int (*convert)(const struct rte_flow_item *item,
119 const void *default_mask,
120 struct mlx4_flow *flow);
121 /** Size in bytes of the destination structure. */
122 const unsigned int dst_sz;
123 /** List of possible subsequent items. */
124 const enum rte_flow_item_type *const next_item;
127 struct rte_flow_drop {
128 struct ibv_qp *qp; /**< Verbs queue pair. */
129 struct ibv_cq *cq; /**< Verbs completion queue. */
133 * Convert Ethernet item to Verbs specification.
136 * Item specification.
137 * @param default_mask[in]
138 * Default bit-masks to use when item->mask is not provided.
139 * @param flow[in, out]
143 mlx4_flow_create_eth(const struct rte_flow_item *item,
144 const void *default_mask,
145 struct mlx4_flow *flow)
147 const struct rte_flow_item_eth *spec = item->spec;
148 const struct rte_flow_item_eth *mask = item->mask;
149 struct ibv_flow_spec_eth *eth;
150 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
153 ++flow->ibv_attr->num_of_specs;
154 flow->ibv_attr->priority = 2;
155 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
156 *eth = (struct ibv_flow_spec_eth) {
157 .type = IBV_FLOW_SPEC_ETH,
161 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
166 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
167 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
168 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
169 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
170 /* Remove unwanted bits from values. */
171 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
172 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
173 eth->val.src_mac[i] &= eth->mask.src_mac[i];
179 * Convert VLAN item to Verbs specification.
182 * Item specification.
183 * @param default_mask[in]
184 * Default bit-masks to use when item->mask is not provided.
185 * @param flow[in, out]
189 mlx4_flow_create_vlan(const struct rte_flow_item *item,
190 const void *default_mask,
191 struct mlx4_flow *flow)
193 const struct rte_flow_item_vlan *spec = item->spec;
194 const struct rte_flow_item_vlan *mask = item->mask;
195 struct ibv_flow_spec_eth *eth;
196 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
198 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
203 eth->val.vlan_tag = spec->tci;
204 eth->mask.vlan_tag = mask->tci;
205 eth->val.vlan_tag &= eth->mask.vlan_tag;
210 * Convert IPv4 item to Verbs specification.
213 * Item specification.
214 * @param default_mask[in]
215 * Default bit-masks to use when item->mask is not provided.
216 * @param flow[in, out]
220 mlx4_flow_create_ipv4(const struct rte_flow_item *item,
221 const void *default_mask,
222 struct mlx4_flow *flow)
224 const struct rte_flow_item_ipv4 *spec = item->spec;
225 const struct rte_flow_item_ipv4 *mask = item->mask;
226 struct ibv_flow_spec_ipv4 *ipv4;
227 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
229 ++flow->ibv_attr->num_of_specs;
230 flow->ibv_attr->priority = 1;
231 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
232 *ipv4 = (struct ibv_flow_spec_ipv4) {
233 .type = IBV_FLOW_SPEC_IPV4,
238 ipv4->val = (struct ibv_flow_ipv4_filter) {
239 .src_ip = spec->hdr.src_addr,
240 .dst_ip = spec->hdr.dst_addr,
244 ipv4->mask = (struct ibv_flow_ipv4_filter) {
245 .src_ip = mask->hdr.src_addr,
246 .dst_ip = mask->hdr.dst_addr,
248 /* Remove unwanted bits from values. */
249 ipv4->val.src_ip &= ipv4->mask.src_ip;
250 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
255 * Convert UDP item to Verbs specification.
258 * Item specification.
259 * @param default_mask[in]
260 * Default bit-masks to use when item->mask is not provided.
261 * @param flow[in, out]
265 mlx4_flow_create_udp(const struct rte_flow_item *item,
266 const void *default_mask,
267 struct mlx4_flow *flow)
269 const struct rte_flow_item_udp *spec = item->spec;
270 const struct rte_flow_item_udp *mask = item->mask;
271 struct ibv_flow_spec_tcp_udp *udp;
272 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
274 ++flow->ibv_attr->num_of_specs;
275 flow->ibv_attr->priority = 0;
276 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
277 *udp = (struct ibv_flow_spec_tcp_udp) {
278 .type = IBV_FLOW_SPEC_UDP,
283 udp->val.dst_port = spec->hdr.dst_port;
284 udp->val.src_port = spec->hdr.src_port;
287 udp->mask.dst_port = mask->hdr.dst_port;
288 udp->mask.src_port = mask->hdr.src_port;
289 /* Remove unwanted bits from values. */
290 udp->val.src_port &= udp->mask.src_port;
291 udp->val.dst_port &= udp->mask.dst_port;
296 * Convert TCP item to Verbs specification.
299 * Item specification.
300 * @param default_mask[in]
301 * Default bit-masks to use when item->mask is not provided.
302 * @param flow[in, out]
306 mlx4_flow_create_tcp(const struct rte_flow_item *item,
307 const void *default_mask,
308 struct mlx4_flow *flow)
310 const struct rte_flow_item_tcp *spec = item->spec;
311 const struct rte_flow_item_tcp *mask = item->mask;
312 struct ibv_flow_spec_tcp_udp *tcp;
313 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
315 ++flow->ibv_attr->num_of_specs;
316 flow->ibv_attr->priority = 0;
317 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
318 *tcp = (struct ibv_flow_spec_tcp_udp) {
319 .type = IBV_FLOW_SPEC_TCP,
324 tcp->val.dst_port = spec->hdr.dst_port;
325 tcp->val.src_port = spec->hdr.src_port;
328 tcp->mask.dst_port = mask->hdr.dst_port;
329 tcp->mask.src_port = mask->hdr.src_port;
330 /* Remove unwanted bits from values. */
331 tcp->val.src_port &= tcp->mask.src_port;
332 tcp->val.dst_port &= tcp->mask.dst_port;
337 * Check support for a given item.
340 * Item specification.
342 * Bit-masks covering supported fields to compare with spec, last and mask in
345 * Bit-Mask size in bytes.
348 * 0 on success, negative value otherwise.
351 mlx4_flow_item_validate(const struct rte_flow_item *item,
352 const uint8_t *mask, unsigned int size)
356 if (!item->spec && (item->mask || item->last))
358 if (item->spec && !item->mask) {
360 const uint8_t *spec = item->spec;
362 for (i = 0; i < size; ++i)
363 if ((spec[i] | mask[i]) != mask[i])
366 if (item->last && !item->mask) {
368 const uint8_t *spec = item->last;
370 for (i = 0; i < size; ++i)
371 if ((spec[i] | mask[i]) != mask[i])
374 if (item->spec && item->last) {
377 const uint8_t *apply = mask;
382 for (i = 0; i < size; ++i) {
383 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
384 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
386 ret = memcmp(spec, last, size);
392 mlx4_flow_validate_eth(const struct rte_flow_item *item,
393 const uint8_t *mask, unsigned int size)
396 const struct rte_flow_item_eth *mask = item->mask;
398 if (mask->dst.addr_bytes[0] != 0xff ||
399 mask->dst.addr_bytes[1] != 0xff ||
400 mask->dst.addr_bytes[2] != 0xff ||
401 mask->dst.addr_bytes[3] != 0xff ||
402 mask->dst.addr_bytes[4] != 0xff ||
403 mask->dst.addr_bytes[5] != 0xff)
406 return mlx4_flow_item_validate(item, mask, size);
410 mlx4_flow_validate_vlan(const struct rte_flow_item *item,
411 const uint8_t *mask, unsigned int size)
414 const struct rte_flow_item_vlan *mask = item->mask;
416 if (mask->tci != 0 &&
417 ntohs(mask->tci) != 0x0fff)
420 return mlx4_flow_item_validate(item, mask, size);
424 mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
425 const uint8_t *mask, unsigned int size)
428 const struct rte_flow_item_ipv4 *mask = item->mask;
430 if (mask->hdr.src_addr != 0 &&
431 mask->hdr.src_addr != 0xffffffff)
433 if (mask->hdr.dst_addr != 0 &&
434 mask->hdr.dst_addr != 0xffffffff)
437 return mlx4_flow_item_validate(item, mask, size);
441 mlx4_flow_validate_udp(const struct rte_flow_item *item,
442 const uint8_t *mask, unsigned int size)
445 const struct rte_flow_item_udp *mask = item->mask;
447 if (mask->hdr.src_port != 0 &&
448 mask->hdr.src_port != 0xffff)
450 if (mask->hdr.dst_port != 0 &&
451 mask->hdr.dst_port != 0xffff)
454 return mlx4_flow_item_validate(item, mask, size);
458 mlx4_flow_validate_tcp(const struct rte_flow_item *item,
459 const uint8_t *mask, unsigned int size)
462 const struct rte_flow_item_tcp *mask = item->mask;
464 if (mask->hdr.src_port != 0 &&
465 mask->hdr.src_port != 0xffff)
467 if (mask->hdr.dst_port != 0 &&
468 mask->hdr.dst_port != 0xffff)
471 return mlx4_flow_item_validate(item, mask, size);
474 /** Graph of supported items and associated actions. */
475 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
476 [RTE_FLOW_ITEM_TYPE_END] = {
477 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
479 [RTE_FLOW_ITEM_TYPE_ETH] = {
480 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
481 RTE_FLOW_ITEM_TYPE_IPV4),
482 .mask = &(const struct rte_flow_item_eth){
483 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
484 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
486 .default_mask = &rte_flow_item_eth_mask,
487 .mask_sz = sizeof(struct rte_flow_item_eth),
488 .validate = mlx4_flow_validate_eth,
489 .convert = mlx4_flow_create_eth,
490 .dst_sz = sizeof(struct ibv_flow_spec_eth),
492 [RTE_FLOW_ITEM_TYPE_VLAN] = {
493 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
494 .mask = &(const struct rte_flow_item_vlan){
495 /* Only TCI VID matching is supported. */
496 .tci = RTE_BE16(0x0fff),
498 .mask_sz = sizeof(struct rte_flow_item_vlan),
499 .validate = mlx4_flow_validate_vlan,
500 .convert = mlx4_flow_create_vlan,
503 [RTE_FLOW_ITEM_TYPE_IPV4] = {
504 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
505 RTE_FLOW_ITEM_TYPE_TCP),
506 .mask = &(const struct rte_flow_item_ipv4){
508 .src_addr = RTE_BE32(0xffffffff),
509 .dst_addr = RTE_BE32(0xffffffff),
512 .default_mask = &rte_flow_item_ipv4_mask,
513 .mask_sz = sizeof(struct rte_flow_item_ipv4),
514 .validate = mlx4_flow_validate_ipv4,
515 .convert = mlx4_flow_create_ipv4,
516 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
518 [RTE_FLOW_ITEM_TYPE_UDP] = {
519 .mask = &(const struct rte_flow_item_udp){
521 .src_port = RTE_BE16(0xffff),
522 .dst_port = RTE_BE16(0xffff),
525 .default_mask = &rte_flow_item_udp_mask,
526 .mask_sz = sizeof(struct rte_flow_item_udp),
527 .validate = mlx4_flow_validate_udp,
528 .convert = mlx4_flow_create_udp,
529 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
531 [RTE_FLOW_ITEM_TYPE_TCP] = {
532 .mask = &(const struct rte_flow_item_tcp){
534 .src_port = RTE_BE16(0xffff),
535 .dst_port = RTE_BE16(0xffff),
538 .default_mask = &rte_flow_item_tcp_mask,
539 .mask_sz = sizeof(struct rte_flow_item_tcp),
540 .validate = mlx4_flow_validate_tcp,
541 .convert = mlx4_flow_create_tcp,
542 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
547 * Make sure a flow rule is supported and initialize associated structure.
550 * Pointer to private structure.
552 * Flow rule attributes.
554 * Pattern specification (list terminated by the END pattern item).
556 * Associated actions (list terminated by the END action).
558 * Perform verbose error reporting if not NULL.
559 * @param[in, out] flow
560 * Flow structure to update.
563 * 0 on success, a negative errno value otherwise and rte_errno is set.
566 mlx4_flow_prepare(struct priv *priv,
567 const struct rte_flow_attr *attr,
568 const struct rte_flow_item pattern[],
569 const struct rte_flow_action actions[],
570 struct rte_flow_error *error,
571 struct mlx4_flow *flow)
573 const struct rte_flow_item *item;
574 const struct rte_flow_action *action;
575 const struct mlx4_flow_proc_item *proc = mlx4_flow_proc_item_list;
576 struct mlx4_flow_target target = {
580 uint32_t priority_override = 0;
583 rte_flow_error_set(error, ENOTSUP,
584 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
586 "groups are not supported");
589 if (priv->isolated) {
590 priority_override = attr->priority;
591 } else if (attr->priority) {
592 rte_flow_error_set(error, ENOTSUP,
593 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
595 "priorities are not supported outside"
599 if (attr->priority > MLX4_FLOW_PRIORITY_LAST) {
600 rte_flow_error_set(error, ENOTSUP,
601 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
603 "maximum priority level is "
604 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
608 rte_flow_error_set(error, ENOTSUP,
609 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
611 "egress is not supported");
614 if (!attr->ingress) {
615 rte_flow_error_set(error, ENOTSUP,
616 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
618 "only ingress is supported");
621 /* Go over pattern. */
622 for (item = pattern; item->type; ++item) {
623 const struct mlx4_flow_proc_item *next = NULL;
627 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
630 * The nic can support patterns with NULL eth spec only
631 * if eth is a single item in a rule.
633 if (!item->spec && item->type == RTE_FLOW_ITEM_TYPE_ETH) {
634 const struct rte_flow_item *next = item + 1;
637 rte_flow_error_set(error, ENOTSUP,
638 RTE_FLOW_ERROR_TYPE_ITEM,
641 " an Ethernet spec");
645 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
646 if (proc->next_item[i] == item->type) {
647 next = &mlx4_flow_proc_item_list[item->type];
652 goto exit_item_not_supported;
654 err = proc->validate(item, proc->mask, proc->mask_sz);
656 goto exit_item_not_supported;
657 if (flow->ibv_attr && proc->convert) {
658 err = proc->convert(item,
659 (proc->default_mask ?
664 goto exit_item_not_supported;
666 flow->offset += proc->dst_sz;
668 /* Use specified priority level when in isolated mode. */
669 if (priv->isolated && flow->ibv_attr)
670 flow->ibv_attr->priority = priority_override;
671 /* Go over actions list. */
672 for (action = actions; action->type; ++action) {
673 switch (action->type) {
674 const struct rte_flow_action_queue *queue;
676 case RTE_FLOW_ACTION_TYPE_VOID:
678 case RTE_FLOW_ACTION_TYPE_DROP:
681 case RTE_FLOW_ACTION_TYPE_QUEUE:
682 queue = action->conf;
683 if (queue->index >= priv->dev->data->nb_rx_queues)
684 goto exit_action_not_supported;
688 goto exit_action_not_supported;
691 if (!target.queue && !target.drop) {
692 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
693 NULL, "no valid action");
697 exit_item_not_supported:
698 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
699 item, "item not supported");
701 exit_action_not_supported:
702 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
703 action, "action not supported");
708 * Validate a flow supported by the NIC.
710 * @see rte_flow_validate()
714 mlx4_flow_validate(struct rte_eth_dev *dev,
715 const struct rte_flow_attr *attr,
716 const struct rte_flow_item pattern[],
717 const struct rte_flow_action actions[],
718 struct rte_flow_error *error)
720 struct priv *priv = dev->data->dev_private;
721 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
723 return mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
727 * Destroy a drop queue.
730 * Pointer to private structure.
733 mlx4_flow_destroy_drop_queue(struct priv *priv)
735 if (priv->flow_drop_queue) {
736 struct rte_flow_drop *fdq = priv->flow_drop_queue;
738 priv->flow_drop_queue = NULL;
739 claim_zero(ibv_destroy_qp(fdq->qp));
740 claim_zero(ibv_destroy_cq(fdq->cq));
746 * Create a single drop queue for all drop flows.
749 * Pointer to private structure.
752 * 0 on success, negative value otherwise.
755 mlx4_flow_create_drop_queue(struct priv *priv)
759 struct rte_flow_drop *fdq;
761 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
763 ERROR("Cannot allocate memory for drop struct");
766 cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
768 ERROR("Cannot create drop CQ");
771 qp = ibv_create_qp(priv->pd,
772 &(struct ibv_qp_init_attr){
779 .qp_type = IBV_QPT_RAW_PACKET,
782 ERROR("Cannot create drop QP");
785 *fdq = (struct rte_flow_drop){
789 priv->flow_drop_queue = fdq;
792 claim_zero(ibv_destroy_cq(cq));
800 * Complete flow rule creation.
803 * Pointer to private structure.
805 * Verbs flow attributes.
807 * Rule target descriptor.
809 * Perform verbose error reporting if not NULL.
812 * A flow if the rule could be created.
814 static struct rte_flow *
815 mlx4_flow_create_target_queue(struct priv *priv,
816 struct ibv_flow_attr *ibv_attr,
817 struct mlx4_flow_target *target,
818 struct rte_flow_error *error)
821 struct rte_flow *rte_flow;
825 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
827 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
828 NULL, "cannot allocate flow memory");
832 qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
834 struct rxq *rxq = priv->dev->data->rx_queues[target->queue_id];
839 rte_flow->ibv_attr = ibv_attr;
842 rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
843 if (!rte_flow->ibv_flow) {
844 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
845 NULL, "flow rule creation failure");
857 * @see rte_flow_create()
860 static struct rte_flow *
861 mlx4_flow_create(struct rte_eth_dev *dev,
862 const struct rte_flow_attr *attr,
863 const struct rte_flow_item pattern[],
864 const struct rte_flow_action actions[],
865 struct rte_flow_error *error)
867 const struct rte_flow_action *action;
868 struct priv *priv = dev->data->dev_private;
869 struct rte_flow *rte_flow;
870 struct mlx4_flow_target target;
871 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
874 err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
877 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
878 if (!flow.ibv_attr) {
879 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
880 NULL, "cannot allocate ibv_attr memory");
883 flow.offset = sizeof(struct ibv_flow_attr);
884 *flow.ibv_attr = (struct ibv_flow_attr){
886 .type = IBV_FLOW_ATTR_NORMAL,
887 .size = sizeof(struct ibv_flow_attr),
888 .priority = attr->priority,
893 claim_zero(mlx4_flow_prepare(priv, attr, pattern, actions,
895 target = (struct mlx4_flow_target){
899 for (action = actions; action->type; ++action) {
900 switch (action->type) {
901 const struct rte_flow_action_queue *queue;
903 case RTE_FLOW_ACTION_TYPE_VOID:
905 case RTE_FLOW_ACTION_TYPE_QUEUE:
906 queue = action->conf;
908 target.queue_id = queue->index;
910 case RTE_FLOW_ACTION_TYPE_DROP:
914 rte_flow_error_set(error, ENOTSUP,
915 RTE_FLOW_ERROR_TYPE_ACTION,
916 action, "unsupported action");
920 rte_flow = mlx4_flow_create_target_queue(priv, flow.ibv_attr,
923 LIST_INSERT_HEAD(&priv->flows, rte_flow, next);
924 DEBUG("Flow created %p", (void *)rte_flow);
928 rte_free(flow.ibv_attr);
933 * Configure isolated mode.
935 * @see rte_flow_isolate()
939 mlx4_flow_isolate(struct rte_eth_dev *dev,
941 struct rte_flow_error *error)
943 struct priv *priv = dev->data->dev_private;
945 if (!!enable == !!priv->isolated)
947 priv->isolated = !!enable;
949 mlx4_mac_addr_del(priv);
950 } else if (mlx4_mac_addr_add(priv) < 0) {
952 return rte_flow_error_set(error, rte_errno,
953 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
954 NULL, "cannot leave isolated mode");
962 * @see rte_flow_destroy()
966 mlx4_flow_destroy(struct rte_eth_dev *dev,
967 struct rte_flow *flow,
968 struct rte_flow_error *error)
972 LIST_REMOVE(flow, next);
974 claim_zero(ibv_destroy_flow(flow->ibv_flow));
975 rte_free(flow->ibv_attr);
976 DEBUG("Flow destroyed %p", (void *)flow);
984 * @see rte_flow_flush()
988 mlx4_flow_flush(struct rte_eth_dev *dev,
989 struct rte_flow_error *error)
991 struct priv *priv = dev->data->dev_private;
993 while (!LIST_EMPTY(&priv->flows)) {
994 struct rte_flow *flow;
996 flow = LIST_FIRST(&priv->flows);
997 mlx4_flow_destroy(dev, flow, error);
1005 * Called by dev_stop() to remove all flows.
1008 * Pointer to private structure.
1011 mlx4_flow_stop(struct priv *priv)
1013 struct rte_flow *flow;
1015 for (flow = LIST_FIRST(&priv->flows);
1017 flow = LIST_NEXT(flow, next)) {
1018 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1019 flow->ibv_flow = NULL;
1020 DEBUG("Flow %p removed", (void *)flow);
1022 mlx4_flow_destroy_drop_queue(priv);
1029 * Pointer to private structure.
1032 * 0 on success, a errno value otherwise and rte_errno is set.
1035 mlx4_flow_start(struct priv *priv)
1039 struct rte_flow *flow;
1041 ret = mlx4_flow_create_drop_queue(priv);
1044 for (flow = LIST_FIRST(&priv->flows);
1046 flow = LIST_NEXT(flow, next)) {
1047 qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
1048 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1049 if (!flow->ibv_flow) {
1050 DEBUG("Flow %p cannot be applied", (void *)flow);
1054 DEBUG("Flow %p applied", (void *)flow);
1059 static const struct rte_flow_ops mlx4_flow_ops = {
1060 .validate = mlx4_flow_validate,
1061 .create = mlx4_flow_create,
1062 .destroy = mlx4_flow_destroy,
1063 .flush = mlx4_flow_flush,
1064 .isolate = mlx4_flow_isolate,
1068 * Manage filter operations.
1071 * Pointer to Ethernet device structure.
1072 * @param filter_type
1075 * Operation to perform.
1077 * Pointer to operation-specific structure.
1080 * 0 on success, negative errno value otherwise and rte_errno is set.
1083 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1084 enum rte_filter_type filter_type,
1085 enum rte_filter_op filter_op,
1088 switch (filter_type) {
1089 case RTE_ETH_FILTER_GENERIC:
1090 if (filter_op != RTE_ETH_FILTER_GET)
1092 *(const void **)arg = &mlx4_flow_ops;
1095 ERROR("%p: filter type (%d) not supported",
1096 (void *)dev, filter_type);
1099 rte_errno = ENOTSUP;