4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-pedantic"
45 #include <infiniband/verbs.h>
47 #pragma GCC diagnostic error "-pedantic"
50 /* DPDK headers don't like -pedantic. */
52 #pragma GCC diagnostic ignored "-pedantic"
54 #include <rte_ether.h>
55 #include <rte_malloc.h>
56 #include <rte_ethdev.h>
57 #include <rte_common.h>
59 #pragma GCC diagnostic error "-pedantic"
63 #include "mlx5_rxtx.h"
65 struct fdir_flow_desc {
72 enum hash_rxq_type type;
75 struct mlx5_fdir_filter {
76 LIST_ENTRY(mlx5_fdir_filter) next;
77 uint16_t queue; /* Queue assigned to if FDIR match. */
78 struct fdir_flow_desc desc;
79 struct ibv_exp_flow *flow;
82 LIST_HEAD(fdir_filter_list, mlx5_fdir_filter);
85 * Convert struct rte_eth_fdir_filter to mlx5 filter descriptor.
87 * @param[in] fdir_filter
88 * DPDK filter structure to convert.
90 * Resulting mlx5 filter descriptor.
95 fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
96 struct fdir_flow_desc *desc, enum rte_fdir_mode mode)
98 /* Initialize descriptor. */
99 memset(desc, 0, sizeof(*desc));
102 desc->vlan_tag = fdir_filter->input.flow_ext.vlan_tci;
104 /* Set MAC address. */
105 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
106 rte_memcpy(desc->mac,
107 fdir_filter->input.flow.mac_vlan_flow.mac_addr.
110 desc->type = HASH_RXQ_ETH;
115 switch (fdir_filter->input.flow_type) {
116 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
117 desc->type = HASH_RXQ_UDPV4;
119 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
120 desc->type = HASH_RXQ_TCPV4;
122 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
123 desc->type = HASH_RXQ_IPV4;
125 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
126 desc->type = HASH_RXQ_UDPV6;
128 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
129 desc->type = HASH_RXQ_TCPV6;
131 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
132 desc->type = HASH_RXQ_IPV6;
138 /* Set flow values */
139 switch (fdir_filter->input.flow_type) {
140 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
141 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
142 desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
143 desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
144 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
145 desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
146 desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
148 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
149 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
150 desc->src_port = fdir_filter->input.flow.udp6_flow.src_port;
151 desc->dst_port = fdir_filter->input.flow.udp6_flow.dst_port;
153 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
154 rte_memcpy(desc->src_ip,
155 fdir_filter->input.flow.ipv6_flow.src_ip,
156 sizeof(desc->src_ip));
157 rte_memcpy(desc->dst_ip,
158 fdir_filter->input.flow.ipv6_flow.dst_ip,
159 sizeof(desc->dst_ip));
167 * Check if two flow descriptors overlap according to configured mask.
170 * Private structure that provides flow director mask.
172 * First flow descriptor to compare.
174 * Second flow descriptor to compare.
177 * Nonzero if descriptors overlap.
180 priv_fdir_overlap(const struct priv *priv,
181 const struct fdir_flow_desc *desc1,
182 const struct fdir_flow_desc *desc2)
184 const struct rte_eth_fdir_masks *mask =
185 &priv->dev->data->dev_conf.fdir_conf.mask;
188 if (desc1->type != desc2->type)
190 /* Ignore non masked bits. */
191 for (i = 0; i != RTE_DIM(desc1->mac); ++i)
192 if ((desc1->mac[i] & mask->mac_addr_byte_mask) !=
193 (desc2->mac[i] & mask->mac_addr_byte_mask))
195 if (((desc1->src_port & mask->src_port_mask) !=
196 (desc2->src_port & mask->src_port_mask)) ||
197 ((desc1->dst_port & mask->dst_port_mask) !=
198 (desc2->dst_port & mask->dst_port_mask)))
200 switch (desc1->type) {
204 if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) !=
205 (desc2->src_ip[0] & mask->ipv4_mask.src_ip)) ||
206 ((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) !=
207 (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
213 for (i = 0; i != RTE_DIM(desc1->src_ip); ++i)
214 if (((desc1->src_ip[i] & mask->ipv6_mask.src_ip[i]) !=
215 (desc2->src_ip[i] & mask->ipv6_mask.src_ip[i])) ||
216 ((desc1->dst_ip[i] & mask->ipv6_mask.dst_ip[i]) !=
217 (desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i])))
227 * Create flow director steering rule for a specific filter.
231 * @param mlx5_fdir_filter
232 * Filter to create a steering rule for.
234 * Flow director queue for matching packets.
237 * 0 on success, errno value on failure.
240 priv_fdir_flow_add(struct priv *priv,
241 struct mlx5_fdir_filter *mlx5_fdir_filter,
242 struct fdir_queue *fdir_queue)
244 struct ibv_exp_flow *flow;
245 struct fdir_flow_desc *desc = &mlx5_fdir_filter->desc;
246 enum rte_fdir_mode fdir_mode =
247 priv->dev->data->dev_conf.fdir_conf.mode;
248 struct rte_eth_fdir_masks *mask =
249 &priv->dev->data->dev_conf.fdir_conf.mask;
250 FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, desc->type));
251 struct ibv_exp_flow_attr *attr = &data->attr;
252 uintptr_t spec_offset = (uintptr_t)&data->spec;
253 struct ibv_exp_flow_spec_eth *spec_eth;
254 struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
255 struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
256 struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
257 struct mlx5_fdir_filter *iter_fdir_filter;
260 /* Abort if an existing flow overlaps this one to avoid packet
261 * duplication, even if it targets another queue. */
262 LIST_FOREACH(iter_fdir_filter, priv->fdir_filter_list, next)
263 if ((iter_fdir_filter != mlx5_fdir_filter) &&
264 (iter_fdir_filter->flow != NULL) &&
265 (priv_fdir_overlap(priv,
266 &mlx5_fdir_filter->desc,
267 &iter_fdir_filter->desc)))
271 * No padding must be inserted by the compiler between attr and spec.
272 * This layout is expected by libibverbs.
274 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec_offset);
275 priv_flow_attr(priv, attr, sizeof(data), desc->type);
277 /* Set Ethernet spec */
278 spec_eth = (struct ibv_exp_flow_spec_eth *)spec_offset;
280 /* The first specification must be Ethernet. */
281 assert(spec_eth->type == IBV_EXP_FLOW_SPEC_ETH);
282 assert(spec_eth->size == sizeof(*spec_eth));
285 spec_eth->val.vlan_tag = desc->vlan_tag & mask->vlan_tci_mask;
286 spec_eth->mask.vlan_tag = mask->vlan_tci_mask;
288 /* Update priority */
291 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
293 for (i = 0; i != RTE_DIM(spec_eth->mask.dst_mac); ++i) {
294 spec_eth->val.dst_mac[i] =
295 desc->mac[i] & mask->mac_addr_byte_mask;
296 spec_eth->mask.dst_mac[i] = mask->mac_addr_byte_mask;
301 switch (desc->type) {
305 spec_offset += spec_eth->size;
308 spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;
310 /* The second specification must be IP. */
311 assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);
312 assert(spec_ipv4->size == sizeof(*spec_ipv4));
314 spec_ipv4->val.src_ip =
315 desc->src_ip[0] & mask->ipv4_mask.src_ip;
316 spec_ipv4->val.dst_ip =
317 desc->dst_ip[0] & mask->ipv4_mask.dst_ip;
318 spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip;
319 spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip;
321 /* Update priority */
324 if (desc->type == HASH_RXQ_IPV4)
327 spec_offset += spec_ipv4->size;
332 spec_offset += spec_eth->size;
335 spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;
337 /* The second specification must be IP. */
338 assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);
339 assert(spec_ipv6->size == sizeof(*spec_ipv6));
341 for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
342 ((uint32_t *)spec_ipv6->val.src_ip)[i] =
343 desc->src_ip[i] & mask->ipv6_mask.src_ip[i];
344 ((uint32_t *)spec_ipv6->val.dst_ip)[i] =
345 desc->dst_ip[i] & mask->ipv6_mask.dst_ip[i];
347 rte_memcpy(spec_ipv6->mask.src_ip,
348 mask->ipv6_mask.src_ip,
349 sizeof(spec_ipv6->mask.src_ip));
350 rte_memcpy(spec_ipv6->mask.dst_ip,
351 mask->ipv6_mask.dst_ip,
352 sizeof(spec_ipv6->mask.dst_ip));
354 /* Update priority */
357 if (desc->type == HASH_RXQ_IPV6)
360 spec_offset += spec_ipv6->size;
363 ERROR("invalid flow attribute type");
367 /* Set TCP/UDP flow specification. */
368 spec_tcp_udp = (struct ibv_exp_flow_spec_tcp_udp *)spec_offset;
370 /* The third specification must be TCP/UDP. */
371 assert(spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_TCP ||
372 spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_UDP);
373 assert(spec_tcp_udp->size == sizeof(*spec_tcp_udp));
375 spec_tcp_udp->val.src_port = desc->src_port & mask->src_port_mask;
376 spec_tcp_udp->val.dst_port = desc->dst_port & mask->dst_port_mask;
377 spec_tcp_udp->mask.src_port = mask->src_port_mask;
378 spec_tcp_udp->mask.dst_port = mask->dst_port_mask;
380 /* Update priority */
386 flow = ibv_exp_create_flow(fdir_queue->qp, attr);
388 /* It's not clear whether errno is always set in this case. */
389 ERROR("%p: flow director configuration failed, errno=%d: %s",
391 (errno ? strerror(errno) : "Unknown error"));
397 DEBUG("%p: added flow director rule (%p)", (void *)priv, (void *)flow);
398 mlx5_fdir_filter->flow = flow;
403 * Destroy a flow director queue.
406 * Flow director queue to be destroyed.
409 priv_fdir_queue_destroy(struct priv *priv, struct fdir_queue *fdir_queue)
411 struct mlx5_fdir_filter *fdir_filter;
413 /* Disable filter flows still applying to this queue. */
414 LIST_FOREACH(fdir_filter, priv->fdir_filter_list, next) {
415 unsigned int idx = fdir_filter->queue;
416 struct rxq_ctrl *rxq_ctrl =
417 container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
419 assert(idx < priv->rxqs_n);
420 if (fdir_queue == rxq_ctrl->fdir_queue &&
421 fdir_filter->flow != NULL) {
422 claim_zero(ibv_exp_destroy_flow(fdir_filter->flow));
423 fdir_filter->flow = NULL;
426 assert(fdir_queue->qp);
427 claim_zero(ibv_destroy_qp(fdir_queue->qp));
428 assert(fdir_queue->ind_table);
429 claim_zero(ibv_exp_destroy_rwq_ind_table(fdir_queue->ind_table));
431 claim_zero(ibv_exp_destroy_wq(fdir_queue->wq));
433 claim_zero(ibv_destroy_cq(fdir_queue->cq));
435 memset(fdir_queue, 0x2a, sizeof(*fdir_queue));
437 rte_free(fdir_queue);
441 * Create a flow director queue.
446 * Work queue to route matched packets to, NULL if one needs to
450 * Related flow director queue on success, NULL otherwise.
452 static struct fdir_queue *
453 priv_fdir_queue_create(struct priv *priv, struct ibv_exp_wq *wq,
456 struct fdir_queue *fdir_queue;
458 fdir_queue = rte_calloc_socket(__func__, 1, sizeof(*fdir_queue),
461 ERROR("cannot allocate flow director queue");
467 fdir_queue->cq = ibv_exp_create_cq(
468 priv->ctx, 1, NULL, NULL, 0,
469 &(struct ibv_exp_cq_init_attr){
472 if (!fdir_queue->cq) {
473 ERROR("cannot create flow director CQ");
476 fdir_queue->wq = ibv_exp_create_wq(
478 &(struct ibv_exp_wq_init_attr){
479 .wq_type = IBV_EXP_WQT_RQ,
483 .cq = fdir_queue->cq,
485 if (!fdir_queue->wq) {
486 ERROR("cannot create flow director WQ");
491 fdir_queue->ind_table = ibv_exp_create_rwq_ind_table(
493 &(struct ibv_exp_rwq_ind_table_init_attr){
495 .log_ind_tbl_size = 0,
499 if (!fdir_queue->ind_table) {
500 ERROR("cannot create flow director indirection table");
503 fdir_queue->qp = ibv_exp_create_qp(
505 &(struct ibv_exp_qp_init_attr){
506 .qp_type = IBV_QPT_RAW_PACKET,
508 IBV_EXP_QP_INIT_ATTR_PD |
509 IBV_EXP_QP_INIT_ATTR_PORT |
510 IBV_EXP_QP_INIT_ATTR_RX_HASH,
512 .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
514 IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
515 .rx_hash_key_len = rss_hash_default_key_len,
516 .rx_hash_key = rss_hash_default_key,
517 .rx_hash_fields_mask = 0,
518 .rwq_ind_tbl = fdir_queue->ind_table,
520 .port_num = priv->port,
522 if (!fdir_queue->qp) {
523 ERROR("cannot create flow director hash RX QP");
529 assert(!fdir_queue->qp);
530 if (fdir_queue->ind_table)
531 claim_zero(ibv_exp_destroy_rwq_ind_table
532 (fdir_queue->ind_table));
534 claim_zero(ibv_exp_destroy_wq(fdir_queue->wq));
536 claim_zero(ibv_destroy_cq(fdir_queue->cq));
537 rte_free(fdir_queue);
542 * Get flow director queue for a specific RX queue, create it in case
551 * Related flow director queue on success, NULL otherwise.
553 static struct fdir_queue *
554 priv_get_fdir_queue(struct priv *priv, uint16_t idx)
556 struct rxq_ctrl *rxq_ctrl =
557 container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
558 struct fdir_queue *fdir_queue = rxq_ctrl->fdir_queue;
560 assert(rxq_ctrl->wq);
561 if (fdir_queue == NULL) {
562 fdir_queue = priv_fdir_queue_create(priv, rxq_ctrl->wq,
564 rxq_ctrl->fdir_queue = fdir_queue;
570 * Enable flow director filter and create steering rules.
574 * @param mlx5_fdir_filter
575 * Filter to create steering rule for.
578 * 0 on success, errno value on failure.
581 priv_fdir_filter_enable(struct priv *priv,
582 struct mlx5_fdir_filter *mlx5_fdir_filter)
584 struct fdir_queue *fdir_queue;
586 /* Check if flow already exists. */
587 if (mlx5_fdir_filter->flow != NULL)
590 /* Get fdir_queue for specific queue. */
591 fdir_queue = priv_get_fdir_queue(priv, mlx5_fdir_filter->queue);
593 if (fdir_queue == NULL) {
594 ERROR("failed to create flow director rxq for queue %d",
595 mlx5_fdir_filter->queue);
600 return priv_fdir_flow_add(priv, mlx5_fdir_filter, fdir_queue);
604 * Initialize flow director filters list.
610 * 0 on success, errno value on failure.
613 fdir_init_filters_list(struct priv *priv)
615 /* Filter list initialization should be done only once. */
616 if (priv->fdir_filter_list)
619 /* Create filters list. */
620 priv->fdir_filter_list =
621 rte_calloc(__func__, 1, sizeof(*priv->fdir_filter_list), 0);
623 if (priv->fdir_filter_list == NULL) {
626 ERROR("cannot allocate flow director filter list: %s",
631 LIST_INIT(priv->fdir_filter_list);
643 priv_fdir_filter_flush(struct priv *priv)
645 struct mlx5_fdir_filter *mlx5_fdir_filter;
647 while ((mlx5_fdir_filter = LIST_FIRST(priv->fdir_filter_list))) {
648 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
650 DEBUG("%p: flushing flow director filter %p",
651 (void *)priv, (void *)mlx5_fdir_filter);
652 LIST_REMOVE(mlx5_fdir_filter, next);
654 claim_zero(ibv_exp_destroy_flow(flow));
655 rte_free(mlx5_fdir_filter);
660 * Remove all flow director filters and delete list.
666 priv_fdir_delete_filters_list(struct priv *priv)
668 priv_fdir_filter_flush(priv);
669 rte_free(priv->fdir_filter_list);
670 priv->fdir_filter_list = NULL;
674 * Disable flow director, remove all steering rules.
680 priv_fdir_disable(struct priv *priv)
683 struct mlx5_fdir_filter *mlx5_fdir_filter;
685 /* Run on every flow director filter and destroy flow handle. */
686 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
687 struct ibv_exp_flow *flow;
689 /* Only valid elements should be in the list */
690 assert(mlx5_fdir_filter != NULL);
691 flow = mlx5_fdir_filter->flow;
693 /* Destroy flow handle */
695 claim_zero(ibv_exp_destroy_flow(flow));
696 mlx5_fdir_filter->flow = NULL;
700 /* Destroy flow director context in each RX queue. */
701 for (i = 0; (i != priv->rxqs_n); i++) {
702 struct rxq_ctrl *rxq_ctrl =
703 container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
705 if (!rxq_ctrl->fdir_queue)
707 priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue);
708 rxq_ctrl->fdir_queue = NULL;
713 * Enable flow director, create steering rules.
719 priv_fdir_enable(struct priv *priv)
721 struct mlx5_fdir_filter *mlx5_fdir_filter;
723 /* Run on every fdir filter and create flow handle */
724 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
725 /* Only valid elements should be in the list */
726 assert(mlx5_fdir_filter != NULL);
728 priv_fdir_filter_enable(priv, mlx5_fdir_filter);
733 * Find specific filter in list.
738 * Flow director filter to find.
741 * Filter element if found, otherwise NULL.
743 static struct mlx5_fdir_filter *
744 priv_find_filter_in_list(struct priv *priv,
745 const struct rte_eth_fdir_filter *fdir_filter)
747 struct fdir_flow_desc desc;
748 struct mlx5_fdir_filter *mlx5_fdir_filter;
749 enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
751 /* Get flow director filter to look for. */
752 fdir_filter_to_flow_desc(fdir_filter, &desc, fdir_mode);
754 /* Look for the requested element. */
755 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
756 /* Only valid elements should be in the list. */
757 assert(mlx5_fdir_filter != NULL);
759 /* Return matching filter. */
760 if (!memcmp(&desc, &mlx5_fdir_filter->desc, sizeof(desc)))
761 return mlx5_fdir_filter;
764 /* Filter not found */
769 * Add new flow director filter and store it in list.
774 * Flow director filter to add.
777 * 0 on success, errno value on failure.
780 priv_fdir_filter_add(struct priv *priv,
781 const struct rte_eth_fdir_filter *fdir_filter)
783 struct mlx5_fdir_filter *mlx5_fdir_filter;
784 enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
787 /* Validate queue number. */
788 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
789 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
793 /* Duplicate filters are currently unsupported. */
794 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
795 if (mlx5_fdir_filter != NULL) {
796 ERROR("filter already exists");
800 /* Create new flow director filter. */
802 rte_calloc(__func__, 1, sizeof(*mlx5_fdir_filter), 0);
803 if (mlx5_fdir_filter == NULL) {
805 ERROR("cannot allocate flow director filter: %s",
811 mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
813 /* Convert to mlx5 filter descriptor. */
814 fdir_filter_to_flow_desc(fdir_filter,
815 &mlx5_fdir_filter->desc, fdir_mode);
817 /* Insert new filter into list. */
818 LIST_INSERT_HEAD(priv->fdir_filter_list, mlx5_fdir_filter, next);
820 DEBUG("%p: flow director filter %p added",
821 (void *)priv, (void *)mlx5_fdir_filter);
823 /* Enable filter immediately if device is started. */
825 err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
831 * Update queue for specific filter.
836 * Filter to be updated.
839 * 0 on success, errno value on failure.
842 priv_fdir_filter_update(struct priv *priv,
843 const struct rte_eth_fdir_filter *fdir_filter)
845 struct mlx5_fdir_filter *mlx5_fdir_filter;
847 /* Validate queue number. */
848 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
849 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
853 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
854 if (mlx5_fdir_filter != NULL) {
855 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
858 /* Update queue number. */
859 mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
861 /* Destroy flow handle. */
863 claim_zero(ibv_exp_destroy_flow(flow));
864 mlx5_fdir_filter->flow = NULL;
866 DEBUG("%p: flow director filter %p updated",
867 (void *)priv, (void *)mlx5_fdir_filter);
869 /* Enable filter if device is started. */
871 err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
876 /* Filter not found, create it. */
877 DEBUG("%p: filter not found for update, creating new filter",
879 return priv_fdir_filter_add(priv, fdir_filter);
883 * Delete specific filter.
888 * Filter to be deleted.
891 * 0 on success, errno value on failure.
894 priv_fdir_filter_delete(struct priv *priv,
895 const struct rte_eth_fdir_filter *fdir_filter)
897 struct mlx5_fdir_filter *mlx5_fdir_filter;
899 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
900 if (mlx5_fdir_filter != NULL) {
901 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
903 /* Remove element from list. */
904 LIST_REMOVE(mlx5_fdir_filter, next);
906 /* Destroy flow handle. */
908 claim_zero(ibv_exp_destroy_flow(flow));
909 mlx5_fdir_filter->flow = NULL;
912 DEBUG("%p: flow director filter %p deleted",
913 (void *)priv, (void *)mlx5_fdir_filter);
916 rte_free(mlx5_fdir_filter);
921 ERROR("%p: flow director delete failed, cannot find filter",
927 * Get flow director information.
931 * @param[out] fdir_info
932 * Resulting flow director information.
935 priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
937 struct rte_eth_fdir_masks *mask =
938 &priv->dev->data->dev_conf.fdir_conf.mask;
940 fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
941 fdir_info->guarant_spc = 0;
943 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
945 fdir_info->max_flexpayload = 0;
946 fdir_info->flow_types_mask[0] = 0;
948 fdir_info->flex_payload_unit = 0;
949 fdir_info->max_flex_payload_segment_num = 0;
950 fdir_info->flex_payload_limit = 0;
951 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
955 * Deal with flow director operations.
958 * Pointer to private structure.
960 * Operation to perform.
962 * Pointer to operation-specific structure.
965 * 0 on success, errno value on failure.
968 priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
970 enum rte_fdir_mode fdir_mode =
971 priv->dev->data->dev_conf.fdir_conf.mode;
974 if (filter_op == RTE_ETH_FILTER_NOP)
977 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
978 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
979 ERROR("%p: flow director mode %d not supported",
980 (void *)priv, fdir_mode);
985 case RTE_ETH_FILTER_ADD:
986 ret = priv_fdir_filter_add(priv, arg);
988 case RTE_ETH_FILTER_UPDATE:
989 ret = priv_fdir_filter_update(priv, arg);
991 case RTE_ETH_FILTER_DELETE:
992 ret = priv_fdir_filter_delete(priv, arg);
994 case RTE_ETH_FILTER_FLUSH:
995 priv_fdir_filter_flush(priv);
997 case RTE_ETH_FILTER_INFO:
998 priv_fdir_info_get(priv, arg);
1001 DEBUG("%p: unknown operation %u", (void *)priv, filter_op);
1009 * Manage filter operations.
1012 * Pointer to Ethernet device structure.
1013 * @param filter_type
1016 * Operation to perform.
1018 * Pointer to operation-specific structure.
1021 * 0 on success, negative errno value on failure.
1024 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
1025 enum rte_filter_type filter_type,
1026 enum rte_filter_op filter_op,
1030 struct priv *priv = dev->data->dev_private;
1032 switch (filter_type) {
1033 case RTE_ETH_FILTER_FDIR:
1035 ret = priv_fdir_ctrl_func(priv, filter_op, arg);
1039 ERROR("%p: filter type (%d) not supported",
1040 (void *)dev, filter_type);