4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
38 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #pragma GCC diagnostic ignored "-Wpedantic"
42 #include <infiniband/verbs.h>
44 #pragma GCC diagnostic error "-Wpedantic"
47 #include <rte_ethdev.h>
49 #include <rte_flow_driver.h>
50 #include <rte_malloc.h>
55 mlx5_flow_create_eth(const struct rte_flow_item *item,
56 const void *default_mask,
60 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
61 const void *default_mask,
65 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
66 const void *default_mask,
70 mlx5_flow_create_udp(const struct rte_flow_item *item,
71 const void *default_mask,
75 mlx5_flow_create_tcp(const struct rte_flow_item *item,
76 const void *default_mask,
80 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
81 struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
82 struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
83 struct ibv_qp *qp; /**< Verbs queue pair. */
84 struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
85 struct ibv_exp_wq *wq; /**< Verbs work queue. */
86 struct ibv_cq *cq; /**< Verbs completion queue. */
87 struct rxq *rxq; /**< Pointer to the queue, NULL if drop queue. */
90 /** Static initializer for items. */
92 (const enum rte_flow_item_type []){ \
93 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
96 /** Structure to generate a simple graph of layers supported by the NIC. */
97 struct mlx5_flow_items {
98 /** List of possible actions for these items. */
99 const enum rte_flow_action_type *const actions;
100 /** Bit-masks corresponding to the possibilities for the item. */
102 /** Bit-masks size in bytes. */
103 const unsigned int mask_sz;
105 * Conversion function from rte_flow to NIC specific flow.
108 * rte_flow item to convert.
109 * @param default_mask
110 * Default bit-masks to use when item->mask is not provided.
112 * Internal structure to store the conversion.
115 * 0 on success, negative value otherwise.
117 int (*convert)(const struct rte_flow_item *item,
118 const void *default_mask,
120 /** Size in bytes of the destination structure. */
121 const unsigned int dst_sz;
122 /** List of possible following items. */
123 const enum rte_flow_item_type *const items;
126 /** Valid action for this PMD. */
127 static const enum rte_flow_action_type valid_actions[] = {
128 RTE_FLOW_ACTION_TYPE_DROP,
129 RTE_FLOW_ACTION_TYPE_QUEUE,
130 RTE_FLOW_ACTION_TYPE_END,
133 /** Graph of supported items and associated actions. */
134 static const struct mlx5_flow_items mlx5_flow_items[] = {
135 [RTE_FLOW_ITEM_TYPE_END] = {
136 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
138 [RTE_FLOW_ITEM_TYPE_ETH] = {
139 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
140 RTE_FLOW_ITEM_TYPE_IPV6),
141 .actions = valid_actions,
142 .mask = &(const struct rte_flow_item_eth){
143 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
144 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
146 .mask_sz = sizeof(struct rte_flow_item_eth),
147 .convert = mlx5_flow_create_eth,
148 .dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
150 [RTE_FLOW_ITEM_TYPE_IPV4] = {
151 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
152 RTE_FLOW_ITEM_TYPE_TCP),
153 .actions = valid_actions,
154 .mask = &(const struct rte_flow_item_ipv4){
160 .mask_sz = sizeof(struct rte_flow_item_ipv4),
161 .convert = mlx5_flow_create_ipv4,
162 .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4),
164 [RTE_FLOW_ITEM_TYPE_IPV6] = {
165 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
166 RTE_FLOW_ITEM_TYPE_TCP),
167 .actions = valid_actions,
168 .mask = &(const struct rte_flow_item_ipv6){
171 0xff, 0xff, 0xff, 0xff,
172 0xff, 0xff, 0xff, 0xff,
173 0xff, 0xff, 0xff, 0xff,
174 0xff, 0xff, 0xff, 0xff,
177 0xff, 0xff, 0xff, 0xff,
178 0xff, 0xff, 0xff, 0xff,
179 0xff, 0xff, 0xff, 0xff,
180 0xff, 0xff, 0xff, 0xff,
184 .mask_sz = sizeof(struct rte_flow_item_ipv6),
185 .convert = mlx5_flow_create_ipv6,
186 .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6),
188 [RTE_FLOW_ITEM_TYPE_UDP] = {
189 .actions = valid_actions,
190 .mask = &(const struct rte_flow_item_udp){
196 .mask_sz = sizeof(struct rte_flow_item_udp),
197 .convert = mlx5_flow_create_udp,
198 .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
200 [RTE_FLOW_ITEM_TYPE_TCP] = {
201 .actions = valid_actions,
202 .mask = &(const struct rte_flow_item_tcp){
208 .mask_sz = sizeof(struct rte_flow_item_tcp),
209 .convert = mlx5_flow_create_tcp,
210 .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
214 /** Structure to pass to the conversion function. */
216 struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
217 unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
220 struct mlx5_flow_action {
221 uint32_t queue:1; /**< Target is a receive queue. */
222 uint32_t drop:1; /**< Target is a drop queue. */
223 uint32_t queue_id; /**< Identifier of the queue. */
227 * Check support for a given item.
230 * Item specification.
232 * Bit-masks covering supported fields to compare with spec, last and mask in
235 * Bit-Mask size in bytes.
241 mlx5_flow_item_validate(const struct rte_flow_item *item,
242 const uint8_t *mask, unsigned int size)
246 if (!item->spec && (item->mask || item->last))
248 if (item->spec && !item->mask) {
250 const uint8_t *spec = item->spec;
252 for (i = 0; i < size; ++i)
253 if ((spec[i] | mask[i]) != mask[i])
256 if (item->last && !item->mask) {
258 const uint8_t *spec = item->last;
260 for (i = 0; i < size; ++i)
261 if ((spec[i] | mask[i]) != mask[i])
266 const uint8_t *spec = item->mask;
268 for (i = 0; i < size; ++i)
269 if ((spec[i] | mask[i]) != mask[i])
272 if (item->spec && item->last) {
275 const uint8_t *apply = mask;
280 for (i = 0; i < size; ++i) {
281 spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
282 last[i] = ((const uint8_t *)item->last)[i] & apply[i];
284 ret = memcmp(spec, last, size);
290 * Validate a flow supported by the NIC.
293 * Pointer to private structure.
295 * Flow rule attributes.
297 * Pattern specification (list terminated by the END pattern item).
299 * Associated actions (list terminated by the END action).
301 * Perform verbose error reporting if not NULL.
302 * @param[in, out] flow
303 * Flow structure to update.
306 * 0 on success, a negative errno value otherwise and rte_errno is set.
309 priv_flow_validate(struct priv *priv,
310 const struct rte_flow_attr *attr,
311 const struct rte_flow_item items[],
312 const struct rte_flow_action actions[],
313 struct rte_flow_error *error,
314 struct mlx5_flow *flow)
316 const struct mlx5_flow_items *cur_item = mlx5_flow_items;
317 struct mlx5_flow_action action = {
324 rte_flow_error_set(error, ENOTSUP,
325 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
327 "groups are not supported");
330 if (attr->priority) {
331 rte_flow_error_set(error, ENOTSUP,
332 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
334 "priorities are not supported");
338 rte_flow_error_set(error, ENOTSUP,
339 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
341 "egress is not supported");
344 if (!attr->ingress) {
345 rte_flow_error_set(error, ENOTSUP,
346 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
348 "only ingress is supported");
351 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
352 const struct mlx5_flow_items *token = NULL;
356 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
360 cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
362 if (cur_item->items[i] == items->type) {
363 token = &mlx5_flow_items[items->type];
368 goto exit_item_not_supported;
370 err = mlx5_flow_item_validate(items,
371 (const uint8_t *)cur_item->mask,
372 sizeof(cur_item->mask_sz));
374 goto exit_item_not_supported;
375 if (flow->ibv_attr && cur_item->convert) {
376 err = cur_item->convert(items, cur_item->mask, flow);
378 goto exit_item_not_supported;
380 flow->offset += cur_item->dst_sz;
382 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
383 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
385 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
387 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
388 const struct rte_flow_action_queue *queue =
389 (const struct rte_flow_action_queue *)
392 if (!queue || (queue->index > (priv->rxqs_n - 1)))
393 goto exit_action_not_supported;
396 goto exit_action_not_supported;
399 if (!action.queue && !action.drop) {
400 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
401 NULL, "no valid action");
405 exit_item_not_supported:
406 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
407 items, "item not supported");
409 exit_action_not_supported:
410 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
411 actions, "action not supported");
416 * Validate a flow supported by the NIC.
418 * @see rte_flow_validate()
422 mlx5_flow_validate(struct rte_eth_dev *dev,
423 const struct rte_flow_attr *attr,
424 const struct rte_flow_item items[],
425 const struct rte_flow_action actions[],
426 struct rte_flow_error *error)
428 struct priv *priv = dev->data->dev_private;
430 struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };
433 ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
439 * Convert Ethernet item to Verbs specification.
442 * Item specification.
443 * @param default_mask[in]
444 * Default bit-masks to use when item->mask is not provided.
445 * @param data[in, out]
449 mlx5_flow_create_eth(const struct rte_flow_item *item,
450 const void *default_mask,
453 const struct rte_flow_item_eth *spec = item->spec;
454 const struct rte_flow_item_eth *mask = item->mask;
455 struct mlx5_flow *flow = (struct mlx5_flow *)data;
456 struct ibv_exp_flow_spec_eth *eth;
457 const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
460 ++flow->ibv_attr->num_of_specs;
461 flow->ibv_attr->priority = 2;
462 eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
463 *eth = (struct ibv_exp_flow_spec_eth) {
464 .type = IBV_EXP_FLOW_SPEC_ETH,
471 memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
472 memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
473 memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
474 memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
475 /* Remove unwanted bits from values. */
476 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
477 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
478 eth->val.src_mac[i] &= eth->mask.src_mac[i];
484 * Convert IPv4 item to Verbs specification.
487 * Item specification.
488 * @param default_mask[in]
489 * Default bit-masks to use when item->mask is not provided.
490 * @param data[in, out]
494 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
495 const void *default_mask,
498 const struct rte_flow_item_ipv4 *spec = item->spec;
499 const struct rte_flow_item_ipv4 *mask = item->mask;
500 struct mlx5_flow *flow = (struct mlx5_flow *)data;
501 struct ibv_exp_flow_spec_ipv4 *ipv4;
502 unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4);
504 ++flow->ibv_attr->num_of_specs;
505 flow->ibv_attr->priority = 1;
506 ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
507 *ipv4 = (struct ibv_exp_flow_spec_ipv4) {
508 .type = IBV_EXP_FLOW_SPEC_IPV4,
515 ipv4->val = (struct ibv_exp_flow_ipv4_filter){
516 .src_ip = spec->hdr.src_addr,
517 .dst_ip = spec->hdr.dst_addr,
519 ipv4->mask = (struct ibv_exp_flow_ipv4_filter){
520 .src_ip = mask->hdr.src_addr,
521 .dst_ip = mask->hdr.dst_addr,
523 /* Remove unwanted bits from values. */
524 ipv4->val.src_ip &= ipv4->mask.src_ip;
525 ipv4->val.dst_ip &= ipv4->mask.dst_ip;
530 * Convert IPv6 item to Verbs specification.
533 * Item specification.
534 * @param default_mask[in]
535 * Default bit-masks to use when item->mask is not provided.
536 * @param data[in, out]
540 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
541 const void *default_mask,
544 const struct rte_flow_item_ipv6 *spec = item->spec;
545 const struct rte_flow_item_ipv6 *mask = item->mask;
546 struct mlx5_flow *flow = (struct mlx5_flow *)data;
547 struct ibv_exp_flow_spec_ipv6 *ipv6;
548 unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6);
551 ++flow->ibv_attr->num_of_specs;
552 flow->ibv_attr->priority = 1;
553 ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
554 *ipv6 = (struct ibv_exp_flow_spec_ipv6) {
555 .type = IBV_EXP_FLOW_SPEC_IPV6,
562 memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
563 RTE_DIM(ipv6->val.src_ip));
564 memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
565 RTE_DIM(ipv6->val.dst_ip));
566 memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
567 RTE_DIM(ipv6->mask.src_ip));
568 memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
569 RTE_DIM(ipv6->mask.dst_ip));
570 /* Remove unwanted bits from values. */
571 for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
572 ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
573 ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
579 * Convert UDP item to Verbs specification.
582 * Item specification.
583 * @param default_mask[in]
584 * Default bit-masks to use when item->mask is not provided.
585 * @param data[in, out]
589 mlx5_flow_create_udp(const struct rte_flow_item *item,
590 const void *default_mask,
593 const struct rte_flow_item_udp *spec = item->spec;
594 const struct rte_flow_item_udp *mask = item->mask;
595 struct mlx5_flow *flow = (struct mlx5_flow *)data;
596 struct ibv_exp_flow_spec_tcp_udp *udp;
597 unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
599 ++flow->ibv_attr->num_of_specs;
600 flow->ibv_attr->priority = 0;
601 udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
602 *udp = (struct ibv_exp_flow_spec_tcp_udp) {
603 .type = IBV_EXP_FLOW_SPEC_UDP,
610 udp->val.dst_port = spec->hdr.dst_port;
611 udp->val.src_port = spec->hdr.src_port;
612 udp->mask.dst_port = mask->hdr.dst_port;
613 udp->mask.src_port = mask->hdr.src_port;
614 /* Remove unwanted bits from values. */
615 udp->val.src_port &= udp->mask.src_port;
616 udp->val.dst_port &= udp->mask.dst_port;
621 * Convert TCP item to Verbs specification.
624 * Item specification.
625 * @param default_mask[in]
626 * Default bit-masks to use when item->mask is not provided.
627 * @param data[in, out]
631 mlx5_flow_create_tcp(const struct rte_flow_item *item,
632 const void *default_mask,
635 const struct rte_flow_item_tcp *spec = item->spec;
636 const struct rte_flow_item_tcp *mask = item->mask;
637 struct mlx5_flow *flow = (struct mlx5_flow *)data;
638 struct ibv_exp_flow_spec_tcp_udp *tcp;
639 unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
641 ++flow->ibv_attr->num_of_specs;
642 flow->ibv_attr->priority = 0;
643 tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
644 *tcp = (struct ibv_exp_flow_spec_tcp_udp) {
645 .type = IBV_EXP_FLOW_SPEC_TCP,
652 tcp->val.dst_port = spec->hdr.dst_port;
653 tcp->val.src_port = spec->hdr.src_port;
654 tcp->mask.dst_port = mask->hdr.dst_port;
655 tcp->mask.src_port = mask->hdr.src_port;
656 /* Remove unwanted bits from values. */
657 tcp->val.src_port &= tcp->mask.src_port;
658 tcp->val.dst_port &= tcp->mask.dst_port;
663 * Complete flow rule creation.
666 * Pointer to private structure.
668 * Verbs flow attributes.
670 * Target action structure.
672 * Perform verbose error reporting if not NULL.
675 * A flow if the rule could be created.
677 static struct rte_flow *
678 priv_flow_create_action_queue(struct priv *priv,
679 struct ibv_exp_flow_attr *ibv_attr,
680 struct mlx5_flow_action *action,
681 struct rte_flow_error *error)
683 struct rxq_ctrl *rxq;
684 struct rte_flow *rte_flow;
688 rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
690 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
691 NULL, "cannot allocate flow memory");
696 ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
697 &(struct ibv_exp_cq_init_attr){
701 rte_flow_error_set(error, ENOMEM,
702 RTE_FLOW_ERROR_TYPE_HANDLE,
703 NULL, "cannot allocate CQ");
706 rte_flow->wq = ibv_exp_create_wq(priv->ctx,
707 &(struct ibv_exp_wq_init_attr){
708 .wq_type = IBV_EXP_WQT_RQ,
715 rxq = container_of((*priv->rxqs)[action->queue_id],
716 struct rxq_ctrl, rxq);
717 rte_flow->rxq = &rxq->rxq;
718 rte_flow->wq = rxq->wq;
720 rte_flow->ibv_attr = ibv_attr;
721 rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
723 &(struct ibv_exp_rwq_ind_table_init_attr){
725 .log_ind_tbl_size = 0,
726 .ind_tbl = &rte_flow->wq,
729 if (!rte_flow->ind_table) {
730 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
731 NULL, "cannot allocate indirection table");
734 rte_flow->qp = ibv_exp_create_qp(
736 &(struct ibv_exp_qp_init_attr){
737 .qp_type = IBV_QPT_RAW_PACKET,
739 IBV_EXP_QP_INIT_ATTR_PD |
740 IBV_EXP_QP_INIT_ATTR_PORT |
741 IBV_EXP_QP_INIT_ATTR_RX_HASH,
743 .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
745 IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
746 .rx_hash_key_len = rss_hash_default_key_len,
747 .rx_hash_key = rss_hash_default_key,
748 .rx_hash_fields_mask = 0,
749 .rwq_ind_tbl = rte_flow->ind_table,
751 .port_num = priv->port,
754 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
755 NULL, "cannot allocate QP");
758 rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
760 if (!rte_flow->ibv_flow) {
761 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
762 NULL, "flow rule creation failure");
769 ibv_destroy_qp(rte_flow->qp);
770 if (rte_flow->ind_table)
771 ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
772 if (!rte_flow->rxq && rte_flow->wq)
773 ibv_exp_destroy_wq(rte_flow->wq);
774 if (!rte_flow->rxq && rte_flow->cq)
775 ibv_destroy_cq(rte_flow->cq);
776 rte_free(rte_flow->ibv_attr);
785 * Pointer to private structure.
787 * Flow rule attributes.
789 * Pattern specification (list terminated by the END pattern item).
791 * Associated actions (list terminated by the END action).
793 * Perform verbose error reporting if not NULL.
796 * A flow on success, NULL otherwise.
798 static struct rte_flow *
799 priv_flow_create(struct priv *priv,
800 const struct rte_flow_attr *attr,
801 const struct rte_flow_item items[],
802 const struct rte_flow_action actions[],
803 struct rte_flow_error *error)
805 struct rte_flow *rte_flow;
806 struct mlx5_flow_action action;
807 struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };
810 err = priv_flow_validate(priv, attr, items, actions, error, &flow);
813 flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
814 flow.offset = sizeof(struct ibv_exp_flow_attr);
815 if (!flow.ibv_attr) {
816 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
817 NULL, "cannot allocate ibv_attr memory");
820 *flow.ibv_attr = (struct ibv_exp_flow_attr){
821 .type = IBV_EXP_FLOW_ATTR_NORMAL,
822 .size = sizeof(struct ibv_exp_flow_attr),
823 .priority = attr->priority,
829 claim_zero(priv_flow_validate(priv, attr, items, actions,
831 action = (struct mlx5_flow_action){
835 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
836 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
838 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
841 ((const struct rte_flow_action_queue *)
842 actions->conf)->index;
843 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
846 rte_flow_error_set(error, ENOTSUP,
847 RTE_FLOW_ERROR_TYPE_ACTION,
848 actions, "unsupported action");
852 rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
856 rte_free(flow.ibv_attr);
863 * @see rte_flow_create()
867 mlx5_flow_create(struct rte_eth_dev *dev,
868 const struct rte_flow_attr *attr,
869 const struct rte_flow_item items[],
870 const struct rte_flow_action actions[],
871 struct rte_flow_error *error)
873 struct priv *priv = dev->data->dev_private;
874 struct rte_flow *flow;
877 flow = priv_flow_create(priv, attr, items, actions, error);
879 LIST_INSERT_HEAD(&priv->flows, flow, next);
880 DEBUG("Flow created %p", (void *)flow);
890 * Pointer to private structure.
895 priv_flow_destroy(struct priv *priv,
896 struct rte_flow *flow)
899 LIST_REMOVE(flow, next);
901 claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
903 claim_zero(ibv_destroy_qp(flow->qp));
905 claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));
906 if (!flow->rxq && flow->wq)
907 claim_zero(ibv_exp_destroy_wq(flow->wq));
908 if (!flow->rxq && flow->cq)
909 claim_zero(ibv_destroy_cq(flow->cq));
910 rte_free(flow->ibv_attr);
911 DEBUG("Flow destroyed %p", (void *)flow);
918 * @see rte_flow_destroy()
922 mlx5_flow_destroy(struct rte_eth_dev *dev,
923 struct rte_flow *flow,
924 struct rte_flow_error *error)
926 struct priv *priv = dev->data->dev_private;
930 priv_flow_destroy(priv, flow);
939 * Pointer to private structure.
942 priv_flow_flush(struct priv *priv)
944 while (!LIST_EMPTY(&priv->flows)) {
945 struct rte_flow *flow;
947 flow = LIST_FIRST(&priv->flows);
948 priv_flow_destroy(priv, flow);
955 * @see rte_flow_flush()
959 mlx5_flow_flush(struct rte_eth_dev *dev,
960 struct rte_flow_error *error)
962 struct priv *priv = dev->data->dev_private;
966 priv_flow_flush(priv);
974 * Called by dev_stop() to remove all flows.
977 * Pointer to private structure.
980 priv_flow_stop(struct priv *priv)
982 struct rte_flow *flow;
984 for (flow = LIST_FIRST(&priv->flows);
986 flow = LIST_NEXT(flow, next)) {
987 claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
988 flow->ibv_flow = NULL;
989 DEBUG("Flow %p removed", (void *)flow);
997 * Pointer to private structure.
1000 * 0 on success, a errno value otherwise and rte_errno is set.
1003 priv_flow_start(struct priv *priv)
1005 struct rte_flow *flow;
1007 for (flow = LIST_FIRST(&priv->flows);
1009 flow = LIST_NEXT(flow, next)) {
1010 flow->ibv_flow = ibv_exp_create_flow(flow->qp,
1012 if (!flow->ibv_flow) {
1013 DEBUG("Flow %p cannot be applied", (void *)flow);
1017 DEBUG("Flow %p applied", (void *)flow);