4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-Wpedantic"
45 #include <infiniband/verbs.h>
47 #pragma GCC diagnostic error "-Wpedantic"
50 /* DPDK headers don't like -pedantic. */
52 #pragma GCC diagnostic ignored "-Wpedantic"
54 #include <rte_ether.h>
55 #include <rte_malloc.h>
56 #include <rte_ethdev.h>
57 #include <rte_common.h>
59 #include <rte_flow_driver.h>
61 #pragma GCC diagnostic error "-Wpedantic"
65 #include "mlx5_rxtx.h"
67 struct fdir_flow_desc {
74 enum hash_rxq_type type;
77 struct mlx5_fdir_filter {
78 LIST_ENTRY(mlx5_fdir_filter) next;
79 uint16_t queue; /* Queue assigned to if FDIR match. */
80 enum rte_eth_fdir_behavior behavior;
81 struct fdir_flow_desc desc;
82 struct ibv_exp_flow *flow;
85 LIST_HEAD(fdir_filter_list, mlx5_fdir_filter);
88 * Convert struct rte_eth_fdir_filter to mlx5 filter descriptor.
90 * @param[in] fdir_filter
91 * DPDK filter structure to convert.
93 * Resulting mlx5 filter descriptor.
98 fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
99 struct fdir_flow_desc *desc, enum rte_fdir_mode mode)
101 /* Initialize descriptor. */
102 memset(desc, 0, sizeof(*desc));
105 desc->vlan_tag = fdir_filter->input.flow_ext.vlan_tci;
107 /* Set MAC address. */
108 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
109 rte_memcpy(desc->mac,
110 fdir_filter->input.flow.mac_vlan_flow.mac_addr.
113 desc->type = HASH_RXQ_ETH;
118 switch (fdir_filter->input.flow_type) {
119 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
120 desc->type = HASH_RXQ_UDPV4;
122 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
123 desc->type = HASH_RXQ_TCPV4;
125 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
126 desc->type = HASH_RXQ_IPV4;
128 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
129 desc->type = HASH_RXQ_UDPV6;
131 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
132 desc->type = HASH_RXQ_TCPV6;
134 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
135 desc->type = HASH_RXQ_IPV6;
141 /* Set flow values */
142 switch (fdir_filter->input.flow_type) {
143 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
144 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
145 desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
146 desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
148 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
149 desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
150 desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
152 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
153 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
154 desc->src_port = fdir_filter->input.flow.udp6_flow.src_port;
155 desc->dst_port = fdir_filter->input.flow.udp6_flow.dst_port;
157 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
158 rte_memcpy(desc->src_ip,
159 fdir_filter->input.flow.ipv6_flow.src_ip,
160 sizeof(desc->src_ip));
161 rte_memcpy(desc->dst_ip,
162 fdir_filter->input.flow.ipv6_flow.dst_ip,
163 sizeof(desc->dst_ip));
171 * Check if two flow descriptors overlap according to configured mask.
174 * Private structure that provides flow director mask.
176 * First flow descriptor to compare.
178 * Second flow descriptor to compare.
181 * Nonzero if descriptors overlap.
184 priv_fdir_overlap(const struct priv *priv,
185 const struct fdir_flow_desc *desc1,
186 const struct fdir_flow_desc *desc2)
188 const struct rte_eth_fdir_masks *mask =
189 &priv->dev->data->dev_conf.fdir_conf.mask;
192 if (desc1->type != desc2->type)
194 /* Ignore non masked bits. */
195 for (i = 0; i != RTE_DIM(desc1->mac); ++i)
196 if ((desc1->mac[i] & mask->mac_addr_byte_mask) !=
197 (desc2->mac[i] & mask->mac_addr_byte_mask))
199 if (((desc1->src_port & mask->src_port_mask) !=
200 (desc2->src_port & mask->src_port_mask)) ||
201 ((desc1->dst_port & mask->dst_port_mask) !=
202 (desc2->dst_port & mask->dst_port_mask)))
204 switch (desc1->type) {
208 if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) !=
209 (desc2->src_ip[0] & mask->ipv4_mask.src_ip)) ||
210 ((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) !=
211 (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
217 for (i = 0; i != RTE_DIM(desc1->src_ip); ++i)
218 if (((desc1->src_ip[i] & mask->ipv6_mask.src_ip[i]) !=
219 (desc2->src_ip[i] & mask->ipv6_mask.src_ip[i])) ||
220 ((desc1->dst_ip[i] & mask->ipv6_mask.dst_ip[i]) !=
221 (desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i])))
231 * Create flow director steering rule for a specific filter.
235 * @param mlx5_fdir_filter
236 * Filter to create a steering rule for.
238 * Flow director queue for matching packets.
241 * 0 on success, errno value on failure.
244 priv_fdir_flow_add(struct priv *priv,
245 struct mlx5_fdir_filter *mlx5_fdir_filter,
246 struct fdir_queue *fdir_queue)
248 struct ibv_exp_flow *flow;
249 struct fdir_flow_desc *desc = &mlx5_fdir_filter->desc;
250 enum rte_fdir_mode fdir_mode =
251 priv->dev->data->dev_conf.fdir_conf.mode;
252 struct rte_eth_fdir_masks *mask =
253 &priv->dev->data->dev_conf.fdir_conf.mask;
254 FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, desc->type));
255 struct ibv_exp_flow_attr *attr = &data->attr;
256 uintptr_t spec_offset = (uintptr_t)&data->spec;
257 struct ibv_exp_flow_spec_eth *spec_eth;
258 struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
259 struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
260 struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
261 struct mlx5_fdir_filter *iter_fdir_filter;
264 /* Abort if an existing flow overlaps this one to avoid packet
265 * duplication, even if it targets another queue. */
266 LIST_FOREACH(iter_fdir_filter, priv->fdir_filter_list, next)
267 if ((iter_fdir_filter != mlx5_fdir_filter) &&
268 (iter_fdir_filter->flow != NULL) &&
269 (priv_fdir_overlap(priv,
270 &mlx5_fdir_filter->desc,
271 &iter_fdir_filter->desc)))
275 * No padding must be inserted by the compiler between attr and spec.
276 * This layout is expected by libibverbs.
278 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec_offset);
279 priv_flow_attr(priv, attr, sizeof(data), desc->type);
281 /* Set Ethernet spec */
282 spec_eth = (struct ibv_exp_flow_spec_eth *)spec_offset;
284 /* The first specification must be Ethernet. */
285 assert(spec_eth->type == IBV_EXP_FLOW_SPEC_ETH);
286 assert(spec_eth->size == sizeof(*spec_eth));
289 spec_eth->val.vlan_tag = desc->vlan_tag & mask->vlan_tci_mask;
290 spec_eth->mask.vlan_tag = mask->vlan_tci_mask;
292 /* Update priority */
295 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
297 for (i = 0; i != RTE_DIM(spec_eth->mask.dst_mac); ++i) {
298 spec_eth->val.dst_mac[i] =
299 desc->mac[i] & mask->mac_addr_byte_mask;
300 spec_eth->mask.dst_mac[i] = mask->mac_addr_byte_mask;
305 switch (desc->type) {
309 spec_offset += spec_eth->size;
312 spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;
314 /* The second specification must be IP. */
315 assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);
316 assert(spec_ipv4->size == sizeof(*spec_ipv4));
318 spec_ipv4->val.src_ip =
319 desc->src_ip[0] & mask->ipv4_mask.src_ip;
320 spec_ipv4->val.dst_ip =
321 desc->dst_ip[0] & mask->ipv4_mask.dst_ip;
322 spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip;
323 spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip;
325 /* Update priority */
328 if (desc->type == HASH_RXQ_IPV4)
331 spec_offset += spec_ipv4->size;
336 spec_offset += spec_eth->size;
339 spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;
341 /* The second specification must be IP. */
342 assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);
343 assert(spec_ipv6->size == sizeof(*spec_ipv6));
345 for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
346 ((uint32_t *)spec_ipv6->val.src_ip)[i] =
347 desc->src_ip[i] & mask->ipv6_mask.src_ip[i];
348 ((uint32_t *)spec_ipv6->val.dst_ip)[i] =
349 desc->dst_ip[i] & mask->ipv6_mask.dst_ip[i];
351 rte_memcpy(spec_ipv6->mask.src_ip,
352 mask->ipv6_mask.src_ip,
353 sizeof(spec_ipv6->mask.src_ip));
354 rte_memcpy(spec_ipv6->mask.dst_ip,
355 mask->ipv6_mask.dst_ip,
356 sizeof(spec_ipv6->mask.dst_ip));
358 /* Update priority */
361 if (desc->type == HASH_RXQ_IPV6)
364 spec_offset += spec_ipv6->size;
367 ERROR("invalid flow attribute type");
371 /* Set TCP/UDP flow specification. */
372 spec_tcp_udp = (struct ibv_exp_flow_spec_tcp_udp *)spec_offset;
374 /* The third specification must be TCP/UDP. */
375 assert(spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_TCP ||
376 spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_UDP);
377 assert(spec_tcp_udp->size == sizeof(*spec_tcp_udp));
379 spec_tcp_udp->val.src_port = desc->src_port & mask->src_port_mask;
380 spec_tcp_udp->val.dst_port = desc->dst_port & mask->dst_port_mask;
381 spec_tcp_udp->mask.src_port = mask->src_port_mask;
382 spec_tcp_udp->mask.dst_port = mask->dst_port_mask;
384 /* Update priority */
390 flow = ibv_exp_create_flow(fdir_queue->qp, attr);
392 /* It's not clear whether errno is always set in this case. */
393 ERROR("%p: flow director configuration failed, errno=%d: %s",
395 (errno ? strerror(errno) : "Unknown error"));
401 DEBUG("%p: added flow director rule (%p)", (void *)priv, (void *)flow);
402 mlx5_fdir_filter->flow = flow;
407 * Destroy a flow director queue.
410 * Flow director queue to be destroyed.
413 priv_fdir_queue_destroy(struct priv *priv, struct fdir_queue *fdir_queue)
415 struct mlx5_fdir_filter *fdir_filter;
417 /* Disable filter flows still applying to this queue. */
418 LIST_FOREACH(fdir_filter, priv->fdir_filter_list, next) {
419 unsigned int idx = fdir_filter->queue;
420 struct rxq_ctrl *rxq_ctrl =
421 container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
423 assert(idx < priv->rxqs_n);
424 if (fdir_queue == rxq_ctrl->fdir_queue &&
425 fdir_filter->flow != NULL) {
426 claim_zero(ibv_exp_destroy_flow(fdir_filter->flow));
427 fdir_filter->flow = NULL;
430 assert(fdir_queue->qp);
431 claim_zero(ibv_destroy_qp(fdir_queue->qp));
432 assert(fdir_queue->ind_table);
433 claim_zero(ibv_exp_destroy_rwq_ind_table(fdir_queue->ind_table));
435 claim_zero(ibv_exp_destroy_wq(fdir_queue->wq));
437 claim_zero(ibv_destroy_cq(fdir_queue->cq));
439 memset(fdir_queue, 0x2a, sizeof(*fdir_queue));
441 rte_free(fdir_queue);
445 * Create a flow director queue.
450 * Work queue to route matched packets to, NULL if one needs to
454 * Related flow director queue on success, NULL otherwise.
456 static struct fdir_queue *
457 priv_fdir_queue_create(struct priv *priv, struct ibv_exp_wq *wq,
460 struct fdir_queue *fdir_queue;
462 fdir_queue = rte_calloc_socket(__func__, 1, sizeof(*fdir_queue),
465 ERROR("cannot allocate flow director queue");
471 fdir_queue->cq = ibv_exp_create_cq(
472 priv->ctx, 1, NULL, NULL, 0,
473 &(struct ibv_exp_cq_init_attr){
476 if (!fdir_queue->cq) {
477 ERROR("cannot create flow director CQ");
480 fdir_queue->wq = ibv_exp_create_wq(
482 &(struct ibv_exp_wq_init_attr){
483 .wq_type = IBV_EXP_WQT_RQ,
487 .cq = fdir_queue->cq,
489 if (!fdir_queue->wq) {
490 ERROR("cannot create flow director WQ");
495 fdir_queue->ind_table = ibv_exp_create_rwq_ind_table(
497 &(struct ibv_exp_rwq_ind_table_init_attr){
499 .log_ind_tbl_size = 0,
503 if (!fdir_queue->ind_table) {
504 ERROR("cannot create flow director indirection table");
507 fdir_queue->qp = ibv_exp_create_qp(
509 &(struct ibv_exp_qp_init_attr){
510 .qp_type = IBV_QPT_RAW_PACKET,
512 IBV_EXP_QP_INIT_ATTR_PD |
513 IBV_EXP_QP_INIT_ATTR_PORT |
514 IBV_EXP_QP_INIT_ATTR_RX_HASH,
516 .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
518 IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
519 .rx_hash_key_len = rss_hash_default_key_len,
520 .rx_hash_key = rss_hash_default_key,
521 .rx_hash_fields_mask = 0,
522 .rwq_ind_tbl = fdir_queue->ind_table,
524 .port_num = priv->port,
526 if (!fdir_queue->qp) {
527 ERROR("cannot create flow director hash RX QP");
533 assert(!fdir_queue->qp);
534 if (fdir_queue->ind_table)
535 claim_zero(ibv_exp_destroy_rwq_ind_table
536 (fdir_queue->ind_table));
538 claim_zero(ibv_exp_destroy_wq(fdir_queue->wq));
540 claim_zero(ibv_destroy_cq(fdir_queue->cq));
541 rte_free(fdir_queue);
546 * Get flow director queue for a specific RX queue, create it in case
555 * Related flow director queue on success, NULL otherwise.
557 static struct fdir_queue *
558 priv_get_fdir_queue(struct priv *priv, uint16_t idx)
560 struct rxq_ctrl *rxq_ctrl =
561 container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
562 struct fdir_queue *fdir_queue = rxq_ctrl->fdir_queue;
564 assert(rxq_ctrl->wq);
565 if (fdir_queue == NULL) {
566 fdir_queue = priv_fdir_queue_create(priv, rxq_ctrl->wq,
568 rxq_ctrl->fdir_queue = fdir_queue;
574 * Get or flow director drop queue. Create it if it does not exist.
580 * Flow director drop queue on success, NULL otherwise.
582 static struct fdir_queue *
583 priv_get_fdir_drop_queue(struct priv *priv)
585 struct fdir_queue *fdir_queue = priv->fdir_drop_queue;
587 if (fdir_queue == NULL) {
588 unsigned int socket = SOCKET_ID_ANY;
590 /* Select a known NUMA socket if possible. */
591 if (priv->rxqs_n && (*priv->rxqs)[0])
592 socket = container_of((*priv->rxqs)[0],
593 struct rxq_ctrl, rxq)->socket;
594 fdir_queue = priv_fdir_queue_create(priv, NULL, socket);
595 priv->fdir_drop_queue = fdir_queue;
601 * Enable flow director filter and create steering rules.
605 * @param mlx5_fdir_filter
606 * Filter to create steering rule for.
609 * 0 on success, errno value on failure.
612 priv_fdir_filter_enable(struct priv *priv,
613 struct mlx5_fdir_filter *mlx5_fdir_filter)
615 struct fdir_queue *fdir_queue;
617 /* Check if flow already exists. */
618 if (mlx5_fdir_filter->flow != NULL)
621 /* Get fdir_queue for specific queue. */
622 if (mlx5_fdir_filter->behavior == RTE_ETH_FDIR_REJECT)
623 fdir_queue = priv_get_fdir_drop_queue(priv);
625 fdir_queue = priv_get_fdir_queue(priv,
626 mlx5_fdir_filter->queue);
628 if (fdir_queue == NULL) {
629 ERROR("failed to create flow director rxq for queue %d",
630 mlx5_fdir_filter->queue);
635 return priv_fdir_flow_add(priv, mlx5_fdir_filter, fdir_queue);
639 * Initialize flow director filters list.
645 * 0 on success, errno value on failure.
648 fdir_init_filters_list(struct priv *priv)
650 /* Filter list initialization should be done only once. */
651 if (priv->fdir_filter_list)
654 /* Create filters list. */
655 priv->fdir_filter_list =
656 rte_calloc(__func__, 1, sizeof(*priv->fdir_filter_list), 0);
658 if (priv->fdir_filter_list == NULL) {
661 ERROR("cannot allocate flow director filter list: %s",
666 LIST_INIT(priv->fdir_filter_list);
678 priv_fdir_filter_flush(struct priv *priv)
680 struct mlx5_fdir_filter *mlx5_fdir_filter;
682 while ((mlx5_fdir_filter = LIST_FIRST(priv->fdir_filter_list))) {
683 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
685 DEBUG("%p: flushing flow director filter %p",
686 (void *)priv, (void *)mlx5_fdir_filter);
687 LIST_REMOVE(mlx5_fdir_filter, next);
689 claim_zero(ibv_exp_destroy_flow(flow));
690 rte_free(mlx5_fdir_filter);
695 * Remove all flow director filters and delete list.
701 priv_fdir_delete_filters_list(struct priv *priv)
703 priv_fdir_filter_flush(priv);
704 rte_free(priv->fdir_filter_list);
705 priv->fdir_filter_list = NULL;
709 * Disable flow director, remove all steering rules.
715 priv_fdir_disable(struct priv *priv)
718 struct mlx5_fdir_filter *mlx5_fdir_filter;
720 /* Run on every flow director filter and destroy flow handle. */
721 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
722 struct ibv_exp_flow *flow;
724 /* Only valid elements should be in the list */
725 assert(mlx5_fdir_filter != NULL);
726 flow = mlx5_fdir_filter->flow;
728 /* Destroy flow handle */
730 claim_zero(ibv_exp_destroy_flow(flow));
731 mlx5_fdir_filter->flow = NULL;
735 /* Destroy flow director context in each RX queue. */
736 for (i = 0; (i != priv->rxqs_n); i++) {
737 struct rxq_ctrl *rxq_ctrl;
739 if (!(*priv->rxqs)[i])
741 rxq_ctrl = container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
742 if (!rxq_ctrl->fdir_queue)
744 priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue);
745 rxq_ctrl->fdir_queue = NULL;
747 if (priv->fdir_drop_queue) {
748 priv_fdir_queue_destroy(priv, priv->fdir_drop_queue);
749 priv->fdir_drop_queue = NULL;
754 * Enable flow director, create steering rules.
760 priv_fdir_enable(struct priv *priv)
762 struct mlx5_fdir_filter *mlx5_fdir_filter;
764 /* Run on every fdir filter and create flow handle */
765 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
766 /* Only valid elements should be in the list */
767 assert(mlx5_fdir_filter != NULL);
769 priv_fdir_filter_enable(priv, mlx5_fdir_filter);
774 * Find specific filter in list.
779 * Flow director filter to find.
782 * Filter element if found, otherwise NULL.
784 static struct mlx5_fdir_filter *
785 priv_find_filter_in_list(struct priv *priv,
786 const struct rte_eth_fdir_filter *fdir_filter)
788 struct fdir_flow_desc desc;
789 struct mlx5_fdir_filter *mlx5_fdir_filter;
790 enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
792 /* Get flow director filter to look for. */
793 fdir_filter_to_flow_desc(fdir_filter, &desc, fdir_mode);
795 /* Look for the requested element. */
796 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
797 /* Only valid elements should be in the list. */
798 assert(mlx5_fdir_filter != NULL);
800 /* Return matching filter. */
801 if (!memcmp(&desc, &mlx5_fdir_filter->desc, sizeof(desc)))
802 return mlx5_fdir_filter;
805 /* Filter not found */
810 * Add new flow director filter and store it in list.
815 * Flow director filter to add.
818 * 0 on success, errno value on failure.
821 priv_fdir_filter_add(struct priv *priv,
822 const struct rte_eth_fdir_filter *fdir_filter)
824 struct mlx5_fdir_filter *mlx5_fdir_filter;
825 enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
828 /* Validate queue number. */
829 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
830 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
834 /* Duplicate filters are currently unsupported. */
835 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
836 if (mlx5_fdir_filter != NULL) {
837 ERROR("filter already exists");
841 /* Create new flow director filter. */
843 rte_calloc(__func__, 1, sizeof(*mlx5_fdir_filter), 0);
844 if (mlx5_fdir_filter == NULL) {
846 ERROR("cannot allocate flow director filter: %s",
851 /* Set action parameters. */
852 mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
853 mlx5_fdir_filter->behavior = fdir_filter->action.behavior;
855 /* Convert to mlx5 filter descriptor. */
856 fdir_filter_to_flow_desc(fdir_filter,
857 &mlx5_fdir_filter->desc, fdir_mode);
859 /* Insert new filter into list. */
860 LIST_INSERT_HEAD(priv->fdir_filter_list, mlx5_fdir_filter, next);
862 DEBUG("%p: flow director filter %p added",
863 (void *)priv, (void *)mlx5_fdir_filter);
865 /* Enable filter immediately if device is started. */
867 err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
873 * Update queue for specific filter.
878 * Filter to be updated.
881 * 0 on success, errno value on failure.
884 priv_fdir_filter_update(struct priv *priv,
885 const struct rte_eth_fdir_filter *fdir_filter)
887 struct mlx5_fdir_filter *mlx5_fdir_filter;
889 /* Validate queue number. */
890 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
891 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
895 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
896 if (mlx5_fdir_filter != NULL) {
897 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
900 /* Update queue number. */
901 mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
903 /* Destroy flow handle. */
905 claim_zero(ibv_exp_destroy_flow(flow));
906 mlx5_fdir_filter->flow = NULL;
908 DEBUG("%p: flow director filter %p updated",
909 (void *)priv, (void *)mlx5_fdir_filter);
911 /* Enable filter if device is started. */
913 err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
918 /* Filter not found, create it. */
919 DEBUG("%p: filter not found for update, creating new filter",
921 return priv_fdir_filter_add(priv, fdir_filter);
925 * Delete specific filter.
930 * Filter to be deleted.
933 * 0 on success, errno value on failure.
936 priv_fdir_filter_delete(struct priv *priv,
937 const struct rte_eth_fdir_filter *fdir_filter)
939 struct mlx5_fdir_filter *mlx5_fdir_filter;
941 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
942 if (mlx5_fdir_filter != NULL) {
943 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
945 /* Remove element from list. */
946 LIST_REMOVE(mlx5_fdir_filter, next);
948 /* Destroy flow handle. */
950 claim_zero(ibv_exp_destroy_flow(flow));
951 mlx5_fdir_filter->flow = NULL;
954 DEBUG("%p: flow director filter %p deleted",
955 (void *)priv, (void *)mlx5_fdir_filter);
958 rte_free(mlx5_fdir_filter);
963 ERROR("%p: flow director delete failed, cannot find filter",
969 * Get flow director information.
973 * @param[out] fdir_info
974 * Resulting flow director information.
977 priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
979 struct rte_eth_fdir_masks *mask =
980 &priv->dev->data->dev_conf.fdir_conf.mask;
982 fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
983 fdir_info->guarant_spc = 0;
985 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
987 fdir_info->max_flexpayload = 0;
988 fdir_info->flow_types_mask[0] = 0;
990 fdir_info->flex_payload_unit = 0;
991 fdir_info->max_flex_payload_segment_num = 0;
992 fdir_info->flex_payload_limit = 0;
993 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
997 * Deal with flow director operations.
1000 * Pointer to private structure.
1002 * Operation to perform.
1004 * Pointer to operation-specific structure.
1007 * 0 on success, errno value on failure.
1010 priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
1012 enum rte_fdir_mode fdir_mode =
1013 priv->dev->data->dev_conf.fdir_conf.mode;
1016 if (filter_op == RTE_ETH_FILTER_NOP)
1019 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
1020 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1021 ERROR("%p: flow director mode %d not supported",
1022 (void *)priv, fdir_mode);
1026 switch (filter_op) {
1027 case RTE_ETH_FILTER_ADD:
1028 ret = priv_fdir_filter_add(priv, arg);
1030 case RTE_ETH_FILTER_UPDATE:
1031 ret = priv_fdir_filter_update(priv, arg);
1033 case RTE_ETH_FILTER_DELETE:
1034 ret = priv_fdir_filter_delete(priv, arg);
1036 case RTE_ETH_FILTER_FLUSH:
1037 priv_fdir_filter_flush(priv);
1039 case RTE_ETH_FILTER_INFO:
1040 priv_fdir_info_get(priv, arg);
1043 DEBUG("%p: unknown operation %u", (void *)priv, filter_op);
1050 static const struct rte_flow_ops mlx5_flow_ops = {
1051 .validate = mlx5_flow_validate,
1052 .create = mlx5_flow_create,
1053 .destroy = mlx5_flow_destroy,
1054 .flush = mlx5_flow_flush,
1056 .isolate = mlx5_flow_isolate,
1060 * Manage filter operations.
1063 * Pointer to Ethernet device structure.
1064 * @param filter_type
1067 * Operation to perform.
1069 * Pointer to operation-specific structure.
1072 * 0 on success, negative errno value on failure.
1075 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
1076 enum rte_filter_type filter_type,
1077 enum rte_filter_op filter_op,
1081 struct priv *priv = dev->data->dev_private;
1083 switch (filter_type) {
1084 case RTE_ETH_FILTER_GENERIC:
1085 if (filter_op != RTE_ETH_FILTER_GET)
1087 *(const void **)arg = &mlx5_flow_ops;
1089 case RTE_ETH_FILTER_FDIR:
1091 ret = priv_fdir_ctrl_func(priv, filter_op, arg);
1095 ERROR("%p: filter type (%d) not supported",
1096 (void *)dev, filter_type);