4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Flow API operations for mlx4 driver.
39 #include <arpa/inet.h>
45 #include <sys/queue.h>
47 /* Verbs headers do not support -pedantic. */
49 #pragma GCC diagnostic ignored "-Wpedantic"
51 #include <infiniband/verbs.h>
53 #pragma GCC diagnostic error "-Wpedantic"
56 #include <rte_byteorder.h>
57 #include <rte_errno.h>
58 #include <rte_eth_ctrl.h>
59 #include <rte_ethdev.h>
61 #include <rte_flow_driver.h>
62 #include <rte_malloc.h>
66 #include "mlx4_flow.h"
67 #include "mlx4_rxtx.h"
68 #include "mlx4_utils.h"
70 /** Static initializer for a list of subsequent item types. */
71 #define NEXT_ITEM(...) \
72 (const enum rte_flow_item_type []){ \
73 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
76 /** Processor structure associated with a flow item. */
77 struct mlx4_flow_proc_item {
78 /** Bit-masks corresponding to the possibilities for the item. */
81 * Default bit-masks to use when item->mask is not provided. When
82 * \default_mask is also NULL, the full supported bit-mask (\mask) is
85 const void *default_mask;
86 /** Bit-masks size in bytes. */
87 const unsigned int mask_sz;
89 * Check support for a given item.
94 * Bit-masks covering supported fields to compare with spec,
98 * Bit-Mask size in bytes.
101 * 0 on success, negative value otherwise.
103 int (*validate)(const struct rte_flow_item *item,
104 const uint8_t *mask, unsigned int size);
106 * Conversion function from rte_flow to NIC specific flow.
109 * rte_flow item to convert.
110 * @param default_mask
111 * Default bit-masks to use when item->mask is not provided.
113 * Internal structure to store the conversion.
116 * 0 on success, negative value otherwise.
118 int (*convert)(const struct rte_flow_item *item,
119 const void *default_mask,
120 struct mlx4_flow *flow);
121 /** Size in bytes of the destination structure. */
122 const unsigned int dst_sz;
123 /** List of possible subsequent items. */
124 const enum rte_flow_item_type *const next_item;
127 struct rte_flow_drop {
128 struct ibv_qp *qp; /**< Verbs queue pair. */
129 struct ibv_cq *cq; /**< Verbs completion queue. */
133 * Convert Ethernet item to Verbs specification.
136 * Item specification.
137 * @param default_mask[in]
138 * Default bit-masks to use when item->mask is not provided.
139 * @param flow[in, out]
143 mlx4_flow_create_eth(const struct rte_flow_item *item,
144 const void *default_mask,
145 struct mlx4_flow *flow)
147 const struct rte_flow_item_eth *spec = item->spec;
148 const struct rte_flow_item_eth *mask = item->mask;
149 struct ibv_flow_spec_eth *eth;
150 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
153 ++flow->ibv_attr->num_of_specs;
154 flow->ibv_attr->priority = 2;
155 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
156 *eth = (struct ibv_flow_spec_eth) {
157 .type = IBV_FLOW_SPEC_ETH,
161 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
166 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
167 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
168 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
169 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
170 /* Remove unwanted bits from values. */
171 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
172 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
173 eth->val.src_mac[i] &= eth->mask.src_mac[i];
179 * Convert VLAN item to Verbs specification.
182 * Item specification.
183 * @param default_mask[in]
184 * Default bit-masks to use when item->mask is not provided.
185 * @param flow[in, out]
189 mlx4_flow_create_vlan(const struct rte_flow_item *item,
190 const void *default_mask,
191 struct mlx4_flow *flow)
193 const struct rte_flow_item_vlan *spec = item->spec;
194 const struct rte_flow_item_vlan *mask = item->mask;
195 struct ibv_flow_spec_eth *eth;
196 const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
198 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
203 eth->val.vlan_tag = spec->tci;
204 eth->mask.vlan_tag = mask->tci;
205 eth->val.vlan_tag &= eth->mask.vlan_tag;
210 * Convert IPv4 item to Verbs specification.
213 * Item specification.
214 * @param default_mask[in]
215 * Default bit-masks to use when item->mask is not provided.
216 * @param flow[in, out]
220 mlx4_flow_create_ipv4(const struct rte_flow_item *item,
221 const void *default_mask,
222 struct mlx4_flow *flow)
224 const struct rte_flow_item_ipv4 *spec = item->spec;
225 const struct rte_flow_item_ipv4 *mask = item->mask;
226 struct ibv_flow_spec_ipv4 *ipv4;
227 unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
229 ++flow->ibv_attr->num_of_specs;
230 flow->ibv_attr->priority = 1;
231 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
232 *ipv4 = (struct ibv_flow_spec_ipv4) {
233 .type = IBV_FLOW_SPEC_IPV4,
238 ipv4->val = (struct ibv_flow_ipv4_filter) {
239 .src_ip = spec->hdr.src_addr,
240 .dst_ip = spec->hdr.dst_addr,
244 ipv4->mask = (struct ibv_flow_ipv4_filter) {
245 .src_ip = mask->hdr.src_addr,
246 .dst_ip = mask->hdr.dst_addr,
248 /* Remove unwanted bits from values. */
249 ipv4->val.src_ip &= ipv4->mask.src_ip;
250 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
255 * Convert UDP item to Verbs specification.
258 * Item specification.
259 * @param default_mask[in]
260 * Default bit-masks to use when item->mask is not provided.
261 * @param flow[in, out]
265 mlx4_flow_create_udp(const struct rte_flow_item *item,
266 const void *default_mask,
267 struct mlx4_flow *flow)
269 const struct rte_flow_item_udp *spec = item->spec;
270 const struct rte_flow_item_udp *mask = item->mask;
271 struct ibv_flow_spec_tcp_udp *udp;
272 unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
274 ++flow->ibv_attr->num_of_specs;
275 flow->ibv_attr->priority = 0;
276 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
277 *udp = (struct ibv_flow_spec_tcp_udp) {
278 .type = IBV_FLOW_SPEC_UDP,
283 udp->val.dst_port = spec->hdr.dst_port;
284 udp->val.src_port = spec->hdr.src_port;
287 udp->mask.dst_port = mask->hdr.dst_port;
288 udp->mask.src_port = mask->hdr.src_port;
289 /* Remove unwanted bits from values. */
290 udp->val.src_port &= udp->mask.src_port;
291 udp->val.dst_port &= udp->mask.dst_port;
296 * Convert TCP item to Verbs specification.
299 * Item specification.
300 * @param default_mask[in]
301 * Default bit-masks to use when item->mask is not provided.
302 * @param flow[in, out]
306 mlx4_flow_create_tcp(const struct rte_flow_item *item,
307 const void *default_mask,
308 struct mlx4_flow *flow)
310 const struct rte_flow_item_tcp *spec = item->spec;
311 const struct rte_flow_item_tcp *mask = item->mask;
312 struct ibv_flow_spec_tcp_udp *tcp;
313 unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
315 ++flow->ibv_attr->num_of_specs;
316 flow->ibv_attr->priority = 0;
317 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
318 *tcp = (struct ibv_flow_spec_tcp_udp) {
319 .type = IBV_FLOW_SPEC_TCP,
324 tcp->val.dst_port = spec->hdr.dst_port;
325 tcp->val.src_port = spec->hdr.src_port;
328 tcp->mask.dst_port = mask->hdr.dst_port;
329 tcp->mask.src_port = mask->hdr.src_port;
330 /* Remove unwanted bits from values. */
331 tcp->val.src_port &= tcp->mask.src_port;
332 tcp->val.dst_port &= tcp->mask.dst_port;
337 * Check support for a given item.
340 * Item specification.
342 * Bit-masks covering supported fields to compare with spec, last and mask in
345 * Bit-Mask size in bytes.
348 * 0 on success, negative value otherwise.
351 mlx4_flow_item_validate(const struct rte_flow_item *item,
352 const uint8_t *mask, unsigned int size)
356 if (!item->spec && (item->mask || item->last))
358 if (item->spec && !item->mask) {
360 const uint8_t *spec = item->spec;
362 for (i = 0; i < size; ++i)
363 if ((spec[i] | mask[i]) != mask[i])
366 if (item->last && !item->mask) {
368 const uint8_t *spec = item->last;
370 for (i = 0; i < size; ++i)
371 if ((spec[i] | mask[i]) != mask[i])
374 if (item->spec && item->last) {
377 const uint8_t *apply = mask;
382 for (i = 0; i < size; ++i) {
383 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
384 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
386 ret = memcmp(spec, last, size);
392 mlx4_flow_validate_eth(const struct rte_flow_item *item,
393 const uint8_t *mask, unsigned int size)
396 const struct rte_flow_item_eth *mask = item->mask;
398 if (mask->dst.addr_bytes[0] != 0xff ||
399 mask->dst.addr_bytes[1] != 0xff ||
400 mask->dst.addr_bytes[2] != 0xff ||
401 mask->dst.addr_bytes[3] != 0xff ||
402 mask->dst.addr_bytes[4] != 0xff ||
403 mask->dst.addr_bytes[5] != 0xff)
406 return mlx4_flow_item_validate(item, mask, size);
410 mlx4_flow_validate_vlan(const struct rte_flow_item *item,
411 const uint8_t *mask, unsigned int size)
414 const struct rte_flow_item_vlan *mask = item->mask;
416 if (mask->tci != 0 &&
417 ntohs(mask->tci) != 0x0fff)
420 return mlx4_flow_item_validate(item, mask, size);
424 mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
425 const uint8_t *mask, unsigned int size)
428 const struct rte_flow_item_ipv4 *mask = item->mask;
430 if (mask->hdr.src_addr != 0 &&
431 mask->hdr.src_addr != 0xffffffff)
433 if (mask->hdr.dst_addr != 0 &&
434 mask->hdr.dst_addr != 0xffffffff)
437 return mlx4_flow_item_validate(item, mask, size);
441 mlx4_flow_validate_udp(const struct rte_flow_item *item,
442 const uint8_t *mask, unsigned int size)
445 const struct rte_flow_item_udp *mask = item->mask;
447 if (mask->hdr.src_port != 0 &&
448 mask->hdr.src_port != 0xffff)
450 if (mask->hdr.dst_port != 0 &&
451 mask->hdr.dst_port != 0xffff)
454 return mlx4_flow_item_validate(item, mask, size);
458 mlx4_flow_validate_tcp(const struct rte_flow_item *item,
459 const uint8_t *mask, unsigned int size)
462 const struct rte_flow_item_tcp *mask = item->mask;
464 if (mask->hdr.src_port != 0 &&
465 mask->hdr.src_port != 0xffff)
467 if (mask->hdr.dst_port != 0 &&
468 mask->hdr.dst_port != 0xffff)
471 return mlx4_flow_item_validate(item, mask, size);
474 /** Graph of supported items and associated actions. */
475 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
476 [RTE_FLOW_ITEM_TYPE_END] = {
477 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
479 [RTE_FLOW_ITEM_TYPE_ETH] = {
480 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
481 RTE_FLOW_ITEM_TYPE_IPV4),
482 .mask = &(const struct rte_flow_item_eth){
483 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
484 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
486 .default_mask = &rte_flow_item_eth_mask,
487 .mask_sz = sizeof(struct rte_flow_item_eth),
488 .validate = mlx4_flow_validate_eth,
489 .convert = mlx4_flow_create_eth,
490 .dst_sz = sizeof(struct ibv_flow_spec_eth),
492 [RTE_FLOW_ITEM_TYPE_VLAN] = {
493 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
494 .mask = &(const struct rte_flow_item_vlan){
495 /* Only TCI VID matching is supported. */
496 .tci = RTE_BE16(0x0fff),
498 .mask_sz = sizeof(struct rte_flow_item_vlan),
499 .validate = mlx4_flow_validate_vlan,
500 .convert = mlx4_flow_create_vlan,
503 [RTE_FLOW_ITEM_TYPE_IPV4] = {
504 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
505 RTE_FLOW_ITEM_TYPE_TCP),
506 .mask = &(const struct rte_flow_item_ipv4){
508 .src_addr = RTE_BE32(0xffffffff),
509 .dst_addr = RTE_BE32(0xffffffff),
512 .default_mask = &rte_flow_item_ipv4_mask,
513 .mask_sz = sizeof(struct rte_flow_item_ipv4),
514 .validate = mlx4_flow_validate_ipv4,
515 .convert = mlx4_flow_create_ipv4,
516 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
518 [RTE_FLOW_ITEM_TYPE_UDP] = {
519 .mask = &(const struct rte_flow_item_udp){
521 .src_port = RTE_BE16(0xffff),
522 .dst_port = RTE_BE16(0xffff),
525 .default_mask = &rte_flow_item_udp_mask,
526 .mask_sz = sizeof(struct rte_flow_item_udp),
527 .validate = mlx4_flow_validate_udp,
528 .convert = mlx4_flow_create_udp,
529 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
531 [RTE_FLOW_ITEM_TYPE_TCP] = {
532 .mask = &(const struct rte_flow_item_tcp){
534 .src_port = RTE_BE16(0xffff),
535 .dst_port = RTE_BE16(0xffff),
538 .default_mask = &rte_flow_item_tcp_mask,
539 .mask_sz = sizeof(struct rte_flow_item_tcp),
540 .validate = mlx4_flow_validate_tcp,
541 .convert = mlx4_flow_create_tcp,
542 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
547 * Make sure a flow rule is supported and initialize associated structure.
550 * Pointer to private structure.
552 * Flow rule attributes.
554 * Pattern specification (list terminated by the END pattern item).
556 * Associated actions (list terminated by the END action).
558 * Perform verbose error reporting if not NULL.
559 * @param[in, out] flow
560 * Flow structure to update.
563 * 0 on success, a negative errno value otherwise and rte_errno is set.
566 mlx4_flow_prepare(struct priv *priv,
567 const struct rte_flow_attr *attr,
568 const struct rte_flow_item pattern[],
569 const struct rte_flow_action actions[],
570 struct rte_flow_error *error,
571 struct mlx4_flow *flow)
573 const struct rte_flow_item *item;
574 const struct rte_flow_action *action;
575 const struct mlx4_flow_proc_item *proc = mlx4_flow_proc_item_list;
576 struct mlx4_flow_target target = {
580 uint32_t priority_override = 0;
583 return rte_flow_error_set
584 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
585 NULL, "groups are not supported");
587 priority_override = attr->priority;
588 else if (attr->priority)
589 return rte_flow_error_set
590 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
592 "priorities are not supported outside isolated mode");
593 if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
594 return rte_flow_error_set
595 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
596 NULL, "maximum priority level is "
597 MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
599 return rte_flow_error_set
600 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
601 NULL, "egress is not supported");
603 return rte_flow_error_set
604 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
605 NULL, "only ingress is supported");
606 /* Go over pattern. */
607 for (item = pattern; item->type; ++item) {
608 const struct mlx4_flow_proc_item *next = NULL;
612 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
615 * The nic can support patterns with NULL eth spec only
616 * if eth is a single item in a rule.
618 if (!item->spec && item->type == RTE_FLOW_ITEM_TYPE_ETH) {
619 const struct rte_flow_item *next = item + 1;
622 return rte_flow_error_set
624 RTE_FLOW_ERROR_TYPE_ITEM, item,
625 "the rule requires an Ethernet spec");
627 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
628 if (proc->next_item[i] == item->type) {
629 next = &mlx4_flow_proc_item_list[item->type];
634 goto exit_item_not_supported;
636 err = proc->validate(item, proc->mask, proc->mask_sz);
638 goto exit_item_not_supported;
639 if (flow->ibv_attr && proc->convert) {
640 err = proc->convert(item,
641 (proc->default_mask ?
646 goto exit_item_not_supported;
648 flow->offset += proc->dst_sz;
650 /* Use specified priority level when in isolated mode. */
651 if (priv->isolated && flow->ibv_attr)
652 flow->ibv_attr->priority = priority_override;
653 /* Go over actions list. */
654 for (action = actions; action->type; ++action) {
655 switch (action->type) {
656 const struct rte_flow_action_queue *queue;
658 case RTE_FLOW_ACTION_TYPE_VOID:
660 case RTE_FLOW_ACTION_TYPE_DROP:
663 case RTE_FLOW_ACTION_TYPE_QUEUE:
664 queue = action->conf;
665 if (queue->index >= priv->dev->data->nb_rx_queues)
666 goto exit_action_not_supported;
670 goto exit_action_not_supported;
673 if (!target.queue && !target.drop)
674 return rte_flow_error_set
675 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
676 NULL, "no valid action");
678 exit_item_not_supported:
679 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
680 item, "item not supported");
681 exit_action_not_supported:
682 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
683 action, "action not supported");
687 * Validate a flow supported by the NIC.
689 * @see rte_flow_validate()
693 mlx4_flow_validate(struct rte_eth_dev *dev,
694 const struct rte_flow_attr *attr,
695 const struct rte_flow_item pattern[],
696 const struct rte_flow_action actions[],
697 struct rte_flow_error *error)
699 struct priv *priv = dev->data->dev_private;
700 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
702 return mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
706 * Destroy a drop queue.
709 * Pointer to private structure.
712 mlx4_flow_destroy_drop_queue(struct priv *priv)
714 if (priv->flow_drop_queue) {
715 struct rte_flow_drop *fdq = priv->flow_drop_queue;
717 priv->flow_drop_queue = NULL;
718 claim_zero(ibv_destroy_qp(fdq->qp));
719 claim_zero(ibv_destroy_cq(fdq->cq));
725 * Create a single drop queue for all drop flows.
728 * Pointer to private structure.
731 * 0 on success, negative value otherwise.
734 mlx4_flow_create_drop_queue(struct priv *priv)
738 struct rte_flow_drop *fdq;
740 fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
742 ERROR("Cannot allocate memory for drop struct");
745 cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
747 ERROR("Cannot create drop CQ");
750 qp = ibv_create_qp(priv->pd,
751 &(struct ibv_qp_init_attr){
758 .qp_type = IBV_QPT_RAW_PACKET,
761 ERROR("Cannot create drop QP");
764 *fdq = (struct rte_flow_drop){
768 priv->flow_drop_queue = fdq;
771 claim_zero(ibv_destroy_cq(cq));
779 * Complete flow rule creation.
782 * Pointer to private structure.
784 * Verbs flow attributes.
786 * Rule target descriptor.
788 * Perform verbose error reporting if not NULL.
791 * A flow if the rule could be created.
793 static struct rte_flow *
794 mlx4_flow_create_target_queue(struct priv *priv,
795 struct ibv_flow_attr *ibv_attr,
796 struct mlx4_flow_target *target,
797 struct rte_flow_error *error)
800 struct rte_flow *rte_flow;
804 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
806 rte_flow_error_set(error, ENOMEM,
807 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
808 NULL, "cannot allocate flow memory");
812 qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
814 struct rxq *rxq = priv->dev->data->rx_queues[target->queue_id];
819 rte_flow->ibv_attr = ibv_attr;
822 rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
823 if (!rte_flow->ibv_flow) {
824 rte_flow_error_set(error, ENOMEM,
825 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
826 NULL, "flow rule creation failure");
838 * @see rte_flow_create()
841 static struct rte_flow *
842 mlx4_flow_create(struct rte_eth_dev *dev,
843 const struct rte_flow_attr *attr,
844 const struct rte_flow_item pattern[],
845 const struct rte_flow_action actions[],
846 struct rte_flow_error *error)
848 const struct rte_flow_action *action;
849 struct priv *priv = dev->data->dev_private;
850 struct rte_flow *rte_flow;
851 struct mlx4_flow_target target;
852 struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
855 err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
858 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
859 if (!flow.ibv_attr) {
860 rte_flow_error_set(error, ENOMEM,
861 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
862 NULL, "cannot allocate ibv_attr memory");
865 flow.offset = sizeof(struct ibv_flow_attr);
866 *flow.ibv_attr = (struct ibv_flow_attr){
868 .type = IBV_FLOW_ATTR_NORMAL,
869 .size = sizeof(struct ibv_flow_attr),
870 .priority = attr->priority,
875 claim_zero(mlx4_flow_prepare(priv, attr, pattern, actions,
877 target = (struct mlx4_flow_target){
881 for (action = actions; action->type; ++action) {
882 switch (action->type) {
883 const struct rte_flow_action_queue *queue;
885 case RTE_FLOW_ACTION_TYPE_VOID:
887 case RTE_FLOW_ACTION_TYPE_QUEUE:
888 queue = action->conf;
890 target.queue_id = queue->index;
892 case RTE_FLOW_ACTION_TYPE_DROP:
896 rte_flow_error_set(error, ENOTSUP,
897 RTE_FLOW_ERROR_TYPE_ACTION,
898 action, "unsupported action");
902 rte_flow = mlx4_flow_create_target_queue(priv, flow.ibv_attr,
905 LIST_INSERT_HEAD(&priv->flows, rte_flow, next);
906 DEBUG("Flow created %p", (void *)rte_flow);
910 rte_free(flow.ibv_attr);
915 * Configure isolated mode.
917 * @see rte_flow_isolate()
921 mlx4_flow_isolate(struct rte_eth_dev *dev,
923 struct rte_flow_error *error)
925 struct priv *priv = dev->data->dev_private;
927 if (!!enable == !!priv->isolated)
929 priv->isolated = !!enable;
931 mlx4_mac_addr_del(priv);
932 } else if (mlx4_mac_addr_add(priv) < 0) {
934 return rte_flow_error_set(error, rte_errno,
935 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
936 NULL, "cannot leave isolated mode");
944 * @see rte_flow_destroy()
948 mlx4_flow_destroy(struct rte_eth_dev *dev,
949 struct rte_flow *flow,
950 struct rte_flow_error *error)
954 LIST_REMOVE(flow, next);
956 claim_zero(ibv_destroy_flow(flow->ibv_flow));
957 rte_free(flow->ibv_attr);
958 DEBUG("Flow destroyed %p", (void *)flow);
966 * @see rte_flow_flush()
970 mlx4_flow_flush(struct rte_eth_dev *dev,
971 struct rte_flow_error *error)
973 struct priv *priv = dev->data->dev_private;
975 while (!LIST_EMPTY(&priv->flows)) {
976 struct rte_flow *flow;
978 flow = LIST_FIRST(&priv->flows);
979 mlx4_flow_destroy(dev, flow, error);
987 * Called by dev_stop() to remove all flows.
990 * Pointer to private structure.
993 mlx4_flow_stop(struct priv *priv)
995 struct rte_flow *flow;
997 for (flow = LIST_FIRST(&priv->flows);
999 flow = LIST_NEXT(flow, next)) {
1000 claim_zero(ibv_destroy_flow(flow->ibv_flow));
1001 flow->ibv_flow = NULL;
1002 DEBUG("Flow %p removed", (void *)flow);
1004 mlx4_flow_destroy_drop_queue(priv);
1011 * Pointer to private structure.
1014 * 0 on success, a errno value otherwise and rte_errno is set.
1017 mlx4_flow_start(struct priv *priv)
1021 struct rte_flow *flow;
1023 ret = mlx4_flow_create_drop_queue(priv);
1026 for (flow = LIST_FIRST(&priv->flows);
1028 flow = LIST_NEXT(flow, next)) {
1029 qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
1030 flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1031 if (!flow->ibv_flow) {
1032 DEBUG("Flow %p cannot be applied", (void *)flow);
1036 DEBUG("Flow %p applied", (void *)flow);
1041 static const struct rte_flow_ops mlx4_flow_ops = {
1042 .validate = mlx4_flow_validate,
1043 .create = mlx4_flow_create,
1044 .destroy = mlx4_flow_destroy,
1045 .flush = mlx4_flow_flush,
1046 .isolate = mlx4_flow_isolate,
1050 * Manage filter operations.
1053 * Pointer to Ethernet device structure.
1054 * @param filter_type
1057 * Operation to perform.
1059 * Pointer to operation-specific structure.
1062 * 0 on success, negative errno value otherwise and rte_errno is set.
1065 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1066 enum rte_filter_type filter_type,
1067 enum rte_filter_op filter_op,
1070 switch (filter_type) {
1071 case RTE_ETH_FILTER_GENERIC:
1072 if (filter_op != RTE_ETH_FILTER_GET)
1074 *(const void **)arg = &mlx4_flow_ops;
1077 ERROR("%p: filter type (%d) not supported",
1078 (void *)dev, filter_type);
1081 rte_errno = ENOTSUP;