4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-pedantic"
45 #include <infiniband/verbs.h>
47 #pragma GCC diagnostic error "-pedantic"
50 /* DPDK headers don't like -pedantic. */
52 #pragma GCC diagnostic ignored "-pedantic"
54 #include <rte_ether.h>
55 #include <rte_malloc.h>
56 #include <rte_ethdev.h>
57 #include <rte_common.h>
59 #pragma GCC diagnostic error "-pedantic"
63 #include "mlx5_rxtx.h"
65 struct fdir_flow_desc {
72 enum hash_rxq_type type;
75 struct mlx5_fdir_filter {
76 LIST_ENTRY(mlx5_fdir_filter) next;
77 uint16_t queue; /* Queue assigned to if FDIR match. */
78 struct fdir_flow_desc desc;
79 struct ibv_exp_flow *flow;
82 LIST_HEAD(fdir_filter_list, mlx5_fdir_filter);
85 * Convert struct rte_eth_fdir_filter to mlx5 filter descriptor.
87 * @param[in] fdir_filter
88 * DPDK filter structure to convert.
90 * Resulting mlx5 filter descriptor.
95 fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
96 struct fdir_flow_desc *desc, enum rte_fdir_mode mode)
98 /* Initialize descriptor. */
99 memset(desc, 0, sizeof(*desc));
102 desc->vlan_tag = fdir_filter->input.flow_ext.vlan_tci;
104 /* Set MAC address. */
105 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
106 rte_memcpy(desc->mac,
107 fdir_filter->input.flow.mac_vlan_flow.mac_addr.
110 desc->type = HASH_RXQ_ETH;
115 switch (fdir_filter->input.flow_type) {
116 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
117 desc->type = HASH_RXQ_UDPV4;
119 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
120 desc->type = HASH_RXQ_TCPV4;
122 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
123 desc->type = HASH_RXQ_IPV4;
125 #ifdef HAVE_FLOW_SPEC_IPV6
126 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
127 desc->type = HASH_RXQ_UDPV6;
129 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
130 desc->type = HASH_RXQ_TCPV6;
132 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
133 desc->type = HASH_RXQ_IPV6;
135 #endif /* HAVE_FLOW_SPEC_IPV6 */
140 /* Set flow values */
141 switch (fdir_filter->input.flow_type) {
142 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
143 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
144 desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
145 desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
146 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
147 desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
148 desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
150 #ifdef HAVE_FLOW_SPEC_IPV6
151 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
152 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
153 desc->src_port = fdir_filter->input.flow.udp6_flow.src_port;
154 desc->dst_port = fdir_filter->input.flow.udp6_flow.dst_port;
156 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
157 rte_memcpy(desc->src_ip,
158 fdir_filter->input.flow.ipv6_flow.src_ip,
159 sizeof(desc->src_ip));
160 rte_memcpy(desc->dst_ip,
161 fdir_filter->input.flow.ipv6_flow.dst_ip,
162 sizeof(desc->dst_ip));
164 #endif /* HAVE_FLOW_SPEC_IPV6 */
171 * Check if two flow descriptors overlap according to configured mask.
174 * Private structure that provides flow director mask.
176 * First flow descriptor to compare.
178 * Second flow descriptor to compare.
181 * Nonzero if descriptors overlap.
184 priv_fdir_overlap(const struct priv *priv,
185 const struct fdir_flow_desc *desc1,
186 const struct fdir_flow_desc *desc2)
188 const struct rte_eth_fdir_masks *mask =
189 &priv->dev->data->dev_conf.fdir_conf.mask;
192 if (desc1->type != desc2->type)
194 /* Ignore non masked bits. */
195 for (i = 0; i != RTE_DIM(desc1->mac); ++i)
196 if ((desc1->mac[i] & mask->mac_addr_byte_mask) !=
197 (desc2->mac[i] & mask->mac_addr_byte_mask))
199 if (((desc1->src_port & mask->src_port_mask) !=
200 (desc2->src_port & mask->src_port_mask)) ||
201 ((desc1->dst_port & mask->dst_port_mask) !=
202 (desc2->dst_port & mask->dst_port_mask)))
204 switch (desc1->type) {
208 if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) !=
209 (desc2->src_ip[0] & mask->ipv4_mask.src_ip)) ||
210 ((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) !=
211 (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
214 #ifdef HAVE_FLOW_SPEC_IPV6
218 for (i = 0; i != RTE_DIM(desc1->src_ip); ++i)
219 if (((desc1->src_ip[i] & mask->ipv6_mask.src_ip[i]) !=
220 (desc2->src_ip[i] & mask->ipv6_mask.src_ip[i])) ||
221 ((desc1->dst_ip[i] & mask->ipv6_mask.dst_ip[i]) !=
222 (desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i])))
225 #endif /* HAVE_FLOW_SPEC_IPV6 */
233 * Create flow director steering rule for a specific filter.
237 * @param mlx5_fdir_filter
238 * Filter to create a steering rule for.
240 * Flow director queue for matching packets.
243 * 0 on success, errno value on failure.
246 priv_fdir_flow_add(struct priv *priv,
247 struct mlx5_fdir_filter *mlx5_fdir_filter,
248 struct fdir_queue *fdir_queue)
250 struct ibv_exp_flow *flow;
251 struct fdir_flow_desc *desc = &mlx5_fdir_filter->desc;
252 enum rte_fdir_mode fdir_mode =
253 priv->dev->data->dev_conf.fdir_conf.mode;
254 struct rte_eth_fdir_masks *mask =
255 &priv->dev->data->dev_conf.fdir_conf.mask;
256 FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, desc->type));
257 struct ibv_exp_flow_attr *attr = &data->attr;
258 uintptr_t spec_offset = (uintptr_t)&data->spec;
259 struct ibv_exp_flow_spec_eth *spec_eth;
260 struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
261 #ifdef HAVE_FLOW_SPEC_IPV6
262 struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
263 #endif /* HAVE_FLOW_SPEC_IPV6 */
264 struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
265 struct mlx5_fdir_filter *iter_fdir_filter;
268 /* Abort if an existing flow overlaps this one to avoid packet
269 * duplication, even if it targets another queue. */
270 LIST_FOREACH(iter_fdir_filter, priv->fdir_filter_list, next)
271 if ((iter_fdir_filter != mlx5_fdir_filter) &&
272 (iter_fdir_filter->flow != NULL) &&
273 (priv_fdir_overlap(priv,
274 &mlx5_fdir_filter->desc,
275 &iter_fdir_filter->desc)))
279 * No padding must be inserted by the compiler between attr and spec.
280 * This layout is expected by libibverbs.
282 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec_offset);
283 priv_flow_attr(priv, attr, sizeof(data), desc->type);
285 /* Set Ethernet spec */
286 spec_eth = (struct ibv_exp_flow_spec_eth *)spec_offset;
288 /* The first specification must be Ethernet. */
289 assert(spec_eth->type == IBV_EXP_FLOW_SPEC_ETH);
290 assert(spec_eth->size == sizeof(*spec_eth));
293 spec_eth->val.vlan_tag = desc->vlan_tag & mask->vlan_tci_mask;
294 spec_eth->mask.vlan_tag = mask->vlan_tci_mask;
296 /* Update priority */
299 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
301 for (i = 0; i != RTE_DIM(spec_eth->mask.dst_mac); ++i) {
302 spec_eth->val.dst_mac[i] =
303 desc->mac[i] & mask->mac_addr_byte_mask;
304 spec_eth->mask.dst_mac[i] = mask->mac_addr_byte_mask;
309 switch (desc->type) {
313 spec_offset += spec_eth->size;
316 spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;
318 /* The second specification must be IP. */
319 assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);
320 assert(spec_ipv4->size == sizeof(*spec_ipv4));
322 spec_ipv4->val.src_ip =
323 desc->src_ip[0] & mask->ipv4_mask.src_ip;
324 spec_ipv4->val.dst_ip =
325 desc->dst_ip[0] & mask->ipv4_mask.dst_ip;
326 spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip;
327 spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip;
329 /* Update priority */
332 if (desc->type == HASH_RXQ_IPV4)
335 spec_offset += spec_ipv4->size;
337 #ifdef HAVE_FLOW_SPEC_IPV6
341 spec_offset += spec_eth->size;
344 spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;
346 /* The second specification must be IP. */
347 assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);
348 assert(spec_ipv6->size == sizeof(*spec_ipv6));
350 for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
351 ((uint32_t *)spec_ipv6->val.src_ip)[i] =
352 desc->src_ip[i] & mask->ipv6_mask.src_ip[i];
353 ((uint32_t *)spec_ipv6->val.dst_ip)[i] =
354 desc->dst_ip[i] & mask->ipv6_mask.dst_ip[i];
356 rte_memcpy(spec_ipv6->mask.src_ip,
357 mask->ipv6_mask.src_ip,
358 sizeof(spec_ipv6->mask.src_ip));
359 rte_memcpy(spec_ipv6->mask.dst_ip,
360 mask->ipv6_mask.dst_ip,
361 sizeof(spec_ipv6->mask.dst_ip));
363 /* Update priority */
366 if (desc->type == HASH_RXQ_IPV6)
369 spec_offset += spec_ipv6->size;
371 #endif /* HAVE_FLOW_SPEC_IPV6 */
373 ERROR("invalid flow attribute type");
377 /* Set TCP/UDP flow specification. */
378 spec_tcp_udp = (struct ibv_exp_flow_spec_tcp_udp *)spec_offset;
380 /* The third specification must be TCP/UDP. */
381 assert(spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_TCP ||
382 spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_UDP);
383 assert(spec_tcp_udp->size == sizeof(*spec_tcp_udp));
385 spec_tcp_udp->val.src_port = desc->src_port & mask->src_port_mask;
386 spec_tcp_udp->val.dst_port = desc->dst_port & mask->dst_port_mask;
387 spec_tcp_udp->mask.src_port = mask->src_port_mask;
388 spec_tcp_udp->mask.dst_port = mask->dst_port_mask;
390 /* Update priority */
396 flow = ibv_exp_create_flow(fdir_queue->qp, attr);
398 /* It's not clear whether errno is always set in this case. */
399 ERROR("%p: flow director configuration failed, errno=%d: %s",
401 (errno ? strerror(errno) : "Unknown error"));
407 DEBUG("%p: added flow director rule (%p)", (void *)priv, (void *)flow);
408 mlx5_fdir_filter->flow = flow;
413 * Get flow director queue for a specific RX queue, create it in case
422 * Related flow director queue on success, NULL otherwise.
424 static struct fdir_queue *
425 priv_get_fdir_queue(struct priv *priv, uint16_t idx)
427 struct rxq_ctrl *rxq_ctrl =
428 container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
429 struct fdir_queue *fdir_queue = &rxq_ctrl->fdir_queue;
430 struct ibv_exp_rwq_ind_table *ind_table = NULL;
431 struct ibv_qp *qp = NULL;
432 struct ibv_exp_rwq_ind_table_init_attr ind_init_attr;
433 struct ibv_exp_rx_hash_conf hash_conf;
434 struct ibv_exp_qp_init_attr qp_init_attr;
437 /* Return immediately if it has already been created. */
438 if (fdir_queue->qp != NULL)
441 ind_init_attr = (struct ibv_exp_rwq_ind_table_init_attr){
443 .log_ind_tbl_size = 0,
444 .ind_tbl = &((*priv->rxqs)[idx]->wq),
449 ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
451 if (ind_table == NULL) {
452 /* Not clear whether errno is set. */
453 err = (errno ? errno : EINVAL);
454 ERROR("RX indirection table creation failed with error %d: %s",
459 /* Create fdir_queue qp. */
460 hash_conf = (struct ibv_exp_rx_hash_conf){
461 .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
462 .rx_hash_key_len = rss_hash_default_key_len,
463 .rx_hash_key = rss_hash_default_key,
464 .rx_hash_fields_mask = 0,
465 .rwq_ind_tbl = ind_table,
467 qp_init_attr = (struct ibv_exp_qp_init_attr){
468 .max_inl_recv = 0, /* Currently not supported. */
469 .qp_type = IBV_QPT_RAW_PACKET,
470 .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
471 IBV_EXP_QP_INIT_ATTR_RX_HASH),
473 .rx_hash_conf = &hash_conf,
474 .port_num = priv->port,
477 qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr);
479 err = (errno ? errno : EINVAL);
480 ERROR("hash RX QP creation failure: %s", strerror(err));
484 fdir_queue->ind_table = ind_table;
491 claim_zero(ibv_destroy_qp(qp));
493 if (ind_table != NULL)
494 claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
500 * Enable flow director filter and create steering rules.
504 * @param mlx5_fdir_filter
505 * Filter to create steering rule for.
508 * 0 on success, errno value on failure.
511 priv_fdir_filter_enable(struct priv *priv,
512 struct mlx5_fdir_filter *mlx5_fdir_filter)
514 struct fdir_queue *fdir_queue;
516 /* Check if flow already exists. */
517 if (mlx5_fdir_filter->flow != NULL)
520 /* Get fdir_queue for specific queue. */
521 fdir_queue = priv_get_fdir_queue(priv, mlx5_fdir_filter->queue);
523 if (fdir_queue == NULL) {
524 ERROR("failed to create flow director rxq for queue %d",
525 mlx5_fdir_filter->queue);
530 return priv_fdir_flow_add(priv, mlx5_fdir_filter, fdir_queue);
534 * Initialize flow director filters list.
540 * 0 on success, errno value on failure.
543 fdir_init_filters_list(struct priv *priv)
545 /* Filter list initialization should be done only once. */
546 if (priv->fdir_filter_list)
549 /* Create filters list. */
550 priv->fdir_filter_list =
551 rte_calloc(__func__, 1, sizeof(*priv->fdir_filter_list), 0);
553 if (priv->fdir_filter_list == NULL) {
556 ERROR("cannot allocate flow director filter list: %s",
561 LIST_INIT(priv->fdir_filter_list);
573 priv_fdir_filter_flush(struct priv *priv)
575 struct mlx5_fdir_filter *mlx5_fdir_filter;
577 while ((mlx5_fdir_filter = LIST_FIRST(priv->fdir_filter_list))) {
578 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
580 DEBUG("%p: flushing flow director filter %p",
581 (void *)priv, (void *)mlx5_fdir_filter);
582 LIST_REMOVE(mlx5_fdir_filter, next);
584 claim_zero(ibv_exp_destroy_flow(flow));
585 rte_free(mlx5_fdir_filter);
590 * Remove all flow director filters and delete list.
596 priv_fdir_delete_filters_list(struct priv *priv)
598 priv_fdir_filter_flush(priv);
599 rte_free(priv->fdir_filter_list);
600 priv->fdir_filter_list = NULL;
604 * Disable flow director, remove all steering rules.
610 priv_fdir_disable(struct priv *priv)
613 struct mlx5_fdir_filter *mlx5_fdir_filter;
614 struct fdir_queue *fdir_queue;
616 /* Run on every flow director filter and destroy flow handle. */
617 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
618 struct ibv_exp_flow *flow;
620 /* Only valid elements should be in the list */
621 assert(mlx5_fdir_filter != NULL);
622 flow = mlx5_fdir_filter->flow;
624 /* Destroy flow handle */
626 claim_zero(ibv_exp_destroy_flow(flow));
627 mlx5_fdir_filter->flow = NULL;
631 /* Run on every RX queue to destroy related flow director QP and
632 * indirection table. */
633 for (i = 0; (i != priv->rxqs_n); i++) {
634 struct rxq_ctrl *rxq_ctrl =
635 container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
637 fdir_queue = &rxq_ctrl->fdir_queue;
638 if (fdir_queue->qp != NULL) {
639 claim_zero(ibv_destroy_qp(fdir_queue->qp));
640 fdir_queue->qp = NULL;
643 if (fdir_queue->ind_table != NULL) {
644 claim_zero(ibv_exp_destroy_rwq_ind_table
645 (fdir_queue->ind_table));
646 fdir_queue->ind_table = NULL;
652 * Enable flow director, create steering rules.
658 priv_fdir_enable(struct priv *priv)
660 struct mlx5_fdir_filter *mlx5_fdir_filter;
662 /* Run on every fdir filter and create flow handle */
663 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
664 /* Only valid elements should be in the list */
665 assert(mlx5_fdir_filter != NULL);
667 priv_fdir_filter_enable(priv, mlx5_fdir_filter);
672 * Find specific filter in list.
677 * Flow director filter to find.
680 * Filter element if found, otherwise NULL.
682 static struct mlx5_fdir_filter *
683 priv_find_filter_in_list(struct priv *priv,
684 const struct rte_eth_fdir_filter *fdir_filter)
686 struct fdir_flow_desc desc;
687 struct mlx5_fdir_filter *mlx5_fdir_filter;
688 enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
690 /* Get flow director filter to look for. */
691 fdir_filter_to_flow_desc(fdir_filter, &desc, fdir_mode);
693 /* Look for the requested element. */
694 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
695 /* Only valid elements should be in the list. */
696 assert(mlx5_fdir_filter != NULL);
698 /* Return matching filter. */
699 if (!memcmp(&desc, &mlx5_fdir_filter->desc, sizeof(desc)))
700 return mlx5_fdir_filter;
703 /* Filter not found */
708 * Add new flow director filter and store it in list.
713 * Flow director filter to add.
716 * 0 on success, errno value on failure.
719 priv_fdir_filter_add(struct priv *priv,
720 const struct rte_eth_fdir_filter *fdir_filter)
722 struct mlx5_fdir_filter *mlx5_fdir_filter;
723 enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
726 /* Validate queue number. */
727 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
728 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
732 /* Duplicate filters are currently unsupported. */
733 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
734 if (mlx5_fdir_filter != NULL) {
735 ERROR("filter already exists");
739 /* Create new flow director filter. */
741 rte_calloc(__func__, 1, sizeof(*mlx5_fdir_filter), 0);
742 if (mlx5_fdir_filter == NULL) {
744 ERROR("cannot allocate flow director filter: %s",
750 mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
752 /* Convert to mlx5 filter descriptor. */
753 fdir_filter_to_flow_desc(fdir_filter,
754 &mlx5_fdir_filter->desc, fdir_mode);
756 /* Insert new filter into list. */
757 LIST_INSERT_HEAD(priv->fdir_filter_list, mlx5_fdir_filter, next);
759 DEBUG("%p: flow director filter %p added",
760 (void *)priv, (void *)mlx5_fdir_filter);
762 /* Enable filter immediately if device is started. */
764 err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
770 * Update queue for specific filter.
775 * Filter to be updated.
778 * 0 on success, errno value on failure.
781 priv_fdir_filter_update(struct priv *priv,
782 const struct rte_eth_fdir_filter *fdir_filter)
784 struct mlx5_fdir_filter *mlx5_fdir_filter;
786 /* Validate queue number. */
787 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
788 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
792 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
793 if (mlx5_fdir_filter != NULL) {
794 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
797 /* Update queue number. */
798 mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
800 /* Destroy flow handle. */
802 claim_zero(ibv_exp_destroy_flow(flow));
803 mlx5_fdir_filter->flow = NULL;
805 DEBUG("%p: flow director filter %p updated",
806 (void *)priv, (void *)mlx5_fdir_filter);
808 /* Enable filter if device is started. */
810 err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
815 /* Filter not found, create it. */
816 DEBUG("%p: filter not found for update, creating new filter",
818 return priv_fdir_filter_add(priv, fdir_filter);
822 * Delete specific filter.
827 * Filter to be deleted.
830 * 0 on success, errno value on failure.
833 priv_fdir_filter_delete(struct priv *priv,
834 const struct rte_eth_fdir_filter *fdir_filter)
836 struct mlx5_fdir_filter *mlx5_fdir_filter;
838 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
839 if (mlx5_fdir_filter != NULL) {
840 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
842 /* Remove element from list. */
843 LIST_REMOVE(mlx5_fdir_filter, next);
845 /* Destroy flow handle. */
847 claim_zero(ibv_exp_destroy_flow(flow));
848 mlx5_fdir_filter->flow = NULL;
851 DEBUG("%p: flow director filter %p deleted",
852 (void *)priv, (void *)mlx5_fdir_filter);
855 rte_free(mlx5_fdir_filter);
860 ERROR("%p: flow director delete failed, cannot find filter",
866 * Get flow director information.
870 * @param[out] fdir_info
871 * Resulting flow director information.
874 priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
876 struct rte_eth_fdir_masks *mask =
877 &priv->dev->data->dev_conf.fdir_conf.mask;
879 fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
880 fdir_info->guarant_spc = 0;
882 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
884 fdir_info->max_flexpayload = 0;
885 fdir_info->flow_types_mask[0] = 0;
887 fdir_info->flex_payload_unit = 0;
888 fdir_info->max_flex_payload_segment_num = 0;
889 fdir_info->flex_payload_limit = 0;
890 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
894 * Deal with flow director operations.
897 * Pointer to private structure.
899 * Operation to perform.
901 * Pointer to operation-specific structure.
904 * 0 on success, errno value on failure.
907 priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
909 enum rte_fdir_mode fdir_mode =
910 priv->dev->data->dev_conf.fdir_conf.mode;
913 if (filter_op == RTE_ETH_FILTER_NOP)
916 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
917 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
918 ERROR("%p: flow director mode %d not supported",
919 (void *)priv, fdir_mode);
924 case RTE_ETH_FILTER_ADD:
925 ret = priv_fdir_filter_add(priv, arg);
927 case RTE_ETH_FILTER_UPDATE:
928 ret = priv_fdir_filter_update(priv, arg);
930 case RTE_ETH_FILTER_DELETE:
931 ret = priv_fdir_filter_delete(priv, arg);
933 case RTE_ETH_FILTER_FLUSH:
934 priv_fdir_filter_flush(priv);
936 case RTE_ETH_FILTER_INFO:
937 priv_fdir_info_get(priv, arg);
940 DEBUG("%p: unknown operation %u", (void *)priv, filter_op);
948 * Manage filter operations.
951 * Pointer to Ethernet device structure.
955 * Operation to perform.
957 * Pointer to operation-specific structure.
960 * 0 on success, negative errno value on failure.
963 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
964 enum rte_filter_type filter_type,
965 enum rte_filter_op filter_op,
969 struct priv *priv = dev->data->dev_private;
971 switch (filter_type) {
972 case RTE_ETH_FILTER_FDIR:
974 ret = priv_fdir_ctrl_func(priv, filter_op, arg);
978 ERROR("%p: filter type (%d) not supported",
979 (void *)dev, filter_type);