4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-Wpedantic"
45 #include <infiniband/verbs.h>
47 #pragma GCC diagnostic error "-Wpedantic"
50 #include <rte_ether.h>
51 #include <rte_malloc.h>
52 #include <rte_ethdev.h>
53 #include <rte_common.h>
55 #include <rte_flow_driver.h>
58 #include "mlx5_rxtx.h"
60 struct fdir_flow_desc {
67 enum hash_rxq_type type;
70 struct mlx5_fdir_filter {
71 LIST_ENTRY(mlx5_fdir_filter) next;
72 uint16_t queue; /* Queue assigned to if FDIR match. */
73 enum rte_eth_fdir_behavior behavior;
74 struct fdir_flow_desc desc;
75 struct ibv_flow *flow;
78 LIST_HEAD(fdir_filter_list, mlx5_fdir_filter);
81 * Convert struct rte_eth_fdir_filter to mlx5 filter descriptor.
83 * @param[in] fdir_filter
84 * DPDK filter structure to convert.
86 * Resulting mlx5 filter descriptor.
91 fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
92 struct fdir_flow_desc *desc, enum rte_fdir_mode mode)
94 /* Initialize descriptor. */
95 memset(desc, 0, sizeof(*desc));
98 desc->vlan_tag = fdir_filter->input.flow_ext.vlan_tci;
100 /* Set MAC address. */
101 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
102 rte_memcpy(desc->mac,
103 fdir_filter->input.flow.mac_vlan_flow.mac_addr.
106 desc->type = HASH_RXQ_ETH;
111 switch (fdir_filter->input.flow_type) {
112 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
113 desc->type = HASH_RXQ_UDPV4;
115 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
116 desc->type = HASH_RXQ_TCPV4;
118 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
119 desc->type = HASH_RXQ_IPV4;
121 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
122 desc->type = HASH_RXQ_UDPV6;
124 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
125 desc->type = HASH_RXQ_TCPV6;
127 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
128 desc->type = HASH_RXQ_IPV6;
134 /* Set flow values */
135 switch (fdir_filter->input.flow_type) {
136 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
137 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
138 desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
139 desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
141 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
142 desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
143 desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
145 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
146 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
147 desc->src_port = fdir_filter->input.flow.udp6_flow.src_port;
148 desc->dst_port = fdir_filter->input.flow.udp6_flow.dst_port;
150 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
151 rte_memcpy(desc->src_ip,
152 fdir_filter->input.flow.ipv6_flow.src_ip,
153 sizeof(desc->src_ip));
154 rte_memcpy(desc->dst_ip,
155 fdir_filter->input.flow.ipv6_flow.dst_ip,
156 sizeof(desc->dst_ip));
164 * Check if two flow descriptors overlap according to configured mask.
167 * Private structure that provides flow director mask.
169 * First flow descriptor to compare.
171 * Second flow descriptor to compare.
174 * Nonzero if descriptors overlap.
177 priv_fdir_overlap(const struct priv *priv,
178 const struct fdir_flow_desc *desc1,
179 const struct fdir_flow_desc *desc2)
181 const struct rte_eth_fdir_masks *mask =
182 &priv->dev->data->dev_conf.fdir_conf.mask;
185 if (desc1->type != desc2->type)
187 /* Ignore non masked bits. */
188 for (i = 0; i != RTE_DIM(desc1->mac); ++i)
189 if ((desc1->mac[i] & mask->mac_addr_byte_mask) !=
190 (desc2->mac[i] & mask->mac_addr_byte_mask))
192 if (((desc1->src_port & mask->src_port_mask) !=
193 (desc2->src_port & mask->src_port_mask)) ||
194 ((desc1->dst_port & mask->dst_port_mask) !=
195 (desc2->dst_port & mask->dst_port_mask)))
197 switch (desc1->type) {
201 if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) !=
202 (desc2->src_ip[0] & mask->ipv4_mask.src_ip)) ||
203 ((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) !=
204 (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
210 for (i = 0; i != RTE_DIM(desc1->src_ip); ++i)
211 if (((desc1->src_ip[i] & mask->ipv6_mask.src_ip[i]) !=
212 (desc2->src_ip[i] & mask->ipv6_mask.src_ip[i])) ||
213 ((desc1->dst_ip[i] & mask->ipv6_mask.dst_ip[i]) !=
214 (desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i])))
224 * Create flow director steering rule for a specific filter.
228 * @param mlx5_fdir_filter
229 * Filter to create a steering rule for.
231 * Flow director queue for matching packets.
234 * 0 on success, errno value on failure.
237 priv_fdir_flow_add(struct priv *priv,
238 struct mlx5_fdir_filter *mlx5_fdir_filter,
239 struct fdir_queue *fdir_queue)
241 struct ibv_flow *flow;
242 struct fdir_flow_desc *desc = &mlx5_fdir_filter->desc;
243 enum rte_fdir_mode fdir_mode =
244 priv->dev->data->dev_conf.fdir_conf.mode;
245 struct rte_eth_fdir_masks *mask =
246 &priv->dev->data->dev_conf.fdir_conf.mask;
247 FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, desc->type));
248 struct ibv_flow_attr *attr = &data->attr;
249 uintptr_t spec_offset = (uintptr_t)&data->spec;
250 struct ibv_flow_spec_eth *spec_eth;
251 struct ibv_flow_spec_ipv4 *spec_ipv4;
252 struct ibv_flow_spec_ipv6 *spec_ipv6;
253 struct ibv_flow_spec_tcp_udp *spec_tcp_udp;
254 struct mlx5_fdir_filter *iter_fdir_filter;
257 /* Abort if an existing flow overlaps this one to avoid packet
258 * duplication, even if it targets another queue. */
259 LIST_FOREACH(iter_fdir_filter, priv->fdir_filter_list, next)
260 if ((iter_fdir_filter != mlx5_fdir_filter) &&
261 (iter_fdir_filter->flow != NULL) &&
262 (priv_fdir_overlap(priv,
263 &mlx5_fdir_filter->desc,
264 &iter_fdir_filter->desc)))
268 * No padding must be inserted by the compiler between attr and spec.
269 * This layout is expected by libibverbs.
271 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec_offset);
272 priv_flow_attr(priv, attr, sizeof(data), desc->type);
274 /* Set Ethernet spec */
275 spec_eth = (struct ibv_flow_spec_eth *)spec_offset;
277 /* The first specification must be Ethernet. */
278 assert(spec_eth->type == IBV_FLOW_SPEC_ETH);
279 assert(spec_eth->size == sizeof(*spec_eth));
282 spec_eth->val.vlan_tag = desc->vlan_tag & mask->vlan_tci_mask;
283 spec_eth->mask.vlan_tag = mask->vlan_tci_mask;
285 /* Update priority */
288 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
290 for (i = 0; i != RTE_DIM(spec_eth->mask.dst_mac); ++i) {
291 spec_eth->val.dst_mac[i] =
292 desc->mac[i] & mask->mac_addr_byte_mask;
293 spec_eth->mask.dst_mac[i] = mask->mac_addr_byte_mask;
298 switch (desc->type) {
302 spec_offset += spec_eth->size;
305 spec_ipv4 = (struct ibv_flow_spec_ipv4 *)spec_offset;
307 /* The second specification must be IP. */
308 assert(spec_ipv4->type == IBV_FLOW_SPEC_IPV4);
309 assert(spec_ipv4->size == sizeof(*spec_ipv4));
311 spec_ipv4->val.src_ip =
312 desc->src_ip[0] & mask->ipv4_mask.src_ip;
313 spec_ipv4->val.dst_ip =
314 desc->dst_ip[0] & mask->ipv4_mask.dst_ip;
315 spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip;
316 spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip;
318 /* Update priority */
321 if (desc->type == HASH_RXQ_IPV4)
324 spec_offset += spec_ipv4->size;
329 spec_offset += spec_eth->size;
332 spec_ipv6 = (struct ibv_flow_spec_ipv6 *)spec_offset;
334 /* The second specification must be IP. */
335 assert(spec_ipv6->type == IBV_FLOW_SPEC_IPV6);
336 assert(spec_ipv6->size == sizeof(*spec_ipv6));
338 for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
339 ((uint32_t *)spec_ipv6->val.src_ip)[i] =
340 desc->src_ip[i] & mask->ipv6_mask.src_ip[i];
341 ((uint32_t *)spec_ipv6->val.dst_ip)[i] =
342 desc->dst_ip[i] & mask->ipv6_mask.dst_ip[i];
344 rte_memcpy(spec_ipv6->mask.src_ip,
345 mask->ipv6_mask.src_ip,
346 sizeof(spec_ipv6->mask.src_ip));
347 rte_memcpy(spec_ipv6->mask.dst_ip,
348 mask->ipv6_mask.dst_ip,
349 sizeof(spec_ipv6->mask.dst_ip));
351 /* Update priority */
354 if (desc->type == HASH_RXQ_IPV6)
357 spec_offset += spec_ipv6->size;
360 ERROR("invalid flow attribute type");
364 /* Set TCP/UDP flow specification. */
365 spec_tcp_udp = (struct ibv_flow_spec_tcp_udp *)spec_offset;
367 /* The third specification must be TCP/UDP. */
368 assert(spec_tcp_udp->type == IBV_FLOW_SPEC_TCP ||
369 spec_tcp_udp->type == IBV_FLOW_SPEC_UDP);
370 assert(spec_tcp_udp->size == sizeof(*spec_tcp_udp));
372 spec_tcp_udp->val.src_port = desc->src_port & mask->src_port_mask;
373 spec_tcp_udp->val.dst_port = desc->dst_port & mask->dst_port_mask;
374 spec_tcp_udp->mask.src_port = mask->src_port_mask;
375 spec_tcp_udp->mask.dst_port = mask->dst_port_mask;
377 /* Update priority */
383 flow = ibv_create_flow(fdir_queue->qp, attr);
385 /* It's not clear whether errno is always set in this case. */
386 ERROR("%p: flow director configuration failed, errno=%d: %s",
388 (errno ? strerror(errno) : "Unknown error"));
394 DEBUG("%p: added flow director rule (%p)", (void *)priv, (void *)flow);
395 mlx5_fdir_filter->flow = flow;
400 * Destroy a flow director queue.
403 * Flow director queue to be destroyed.
406 priv_fdir_queue_destroy(struct priv *priv, struct fdir_queue *fdir_queue)
408 struct mlx5_fdir_filter *fdir_filter;
410 /* Disable filter flows still applying to this queue. */
411 LIST_FOREACH(fdir_filter, priv->fdir_filter_list, next) {
412 unsigned int idx = fdir_filter->queue;
413 struct rxq_ctrl *rxq_ctrl =
414 container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
416 assert(idx < priv->rxqs_n);
417 if (fdir_queue == rxq_ctrl->fdir_queue &&
418 fdir_filter->flow != NULL) {
419 claim_zero(ibv_destroy_flow(fdir_filter->flow));
420 fdir_filter->flow = NULL;
423 assert(fdir_queue->qp);
424 claim_zero(ibv_destroy_qp(fdir_queue->qp));
425 assert(fdir_queue->ind_table);
426 claim_zero(ibv_destroy_rwq_ind_table(fdir_queue->ind_table));
428 claim_zero(ibv_destroy_wq(fdir_queue->wq));
430 claim_zero(ibv_destroy_cq(fdir_queue->cq));
432 memset(fdir_queue, 0x2a, sizeof(*fdir_queue));
434 rte_free(fdir_queue);
438 * Create a flow director queue.
443 * Work queue to route matched packets to, NULL if one needs to
447 * Related flow director queue on success, NULL otherwise.
449 static struct fdir_queue *
450 priv_fdir_queue_create(struct priv *priv, struct ibv_wq *wq,
453 struct fdir_queue *fdir_queue;
455 fdir_queue = rte_calloc_socket(__func__, 1, sizeof(*fdir_queue),
458 ERROR("cannot allocate flow director queue");
464 fdir_queue->cq = ibv_create_cq(
465 priv->ctx, 1, NULL, NULL, 0);
466 if (!fdir_queue->cq) {
467 ERROR("cannot create flow director CQ");
470 fdir_queue->wq = ibv_create_wq(
472 &(struct ibv_wq_init_attr){
473 .wq_type = IBV_WQT_RQ,
477 .cq = fdir_queue->cq,
479 if (!fdir_queue->wq) {
480 ERROR("cannot create flow director WQ");
485 fdir_queue->ind_table = ibv_create_rwq_ind_table(
487 &(struct ibv_rwq_ind_table_init_attr){
488 .log_ind_tbl_size = 0,
492 if (!fdir_queue->ind_table) {
493 ERROR("cannot create flow director indirection table");
496 fdir_queue->qp = ibv_create_qp_ex(
498 &(struct ibv_qp_init_attr_ex){
499 .qp_type = IBV_QPT_RAW_PACKET,
501 IBV_QP_INIT_ATTR_PD |
502 IBV_QP_INIT_ATTR_IND_TABLE |
503 IBV_QP_INIT_ATTR_RX_HASH,
504 .rx_hash_conf = (struct ibv_rx_hash_conf){
506 IBV_RX_HASH_FUNC_TOEPLITZ,
507 .rx_hash_key_len = rss_hash_default_key_len,
508 .rx_hash_key = rss_hash_default_key,
509 .rx_hash_fields_mask = 0,
511 .rwq_ind_tbl = fdir_queue->ind_table,
514 if (!fdir_queue->qp) {
515 ERROR("cannot create flow director hash RX QP");
521 assert(!fdir_queue->qp);
522 if (fdir_queue->ind_table)
523 claim_zero(ibv_destroy_rwq_ind_table
524 (fdir_queue->ind_table));
526 claim_zero(ibv_destroy_wq(fdir_queue->wq));
528 claim_zero(ibv_destroy_cq(fdir_queue->cq));
529 rte_free(fdir_queue);
534 * Get flow director queue for a specific RX queue, create it in case
543 * Related flow director queue on success, NULL otherwise.
545 static struct fdir_queue *
546 priv_get_fdir_queue(struct priv *priv, uint16_t idx)
548 struct rxq_ctrl *rxq_ctrl =
549 container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
550 struct fdir_queue *fdir_queue = rxq_ctrl->fdir_queue;
552 assert(rxq_ctrl->wq);
553 if (fdir_queue == NULL) {
554 fdir_queue = priv_fdir_queue_create(priv, rxq_ctrl->wq,
556 rxq_ctrl->fdir_queue = fdir_queue;
562 * Get or flow director drop queue. Create it if it does not exist.
568 * Flow director drop queue on success, NULL otherwise.
570 static struct fdir_queue *
571 priv_get_fdir_drop_queue(struct priv *priv)
573 struct fdir_queue *fdir_queue = priv->fdir_drop_queue;
575 if (fdir_queue == NULL) {
576 unsigned int socket = SOCKET_ID_ANY;
578 /* Select a known NUMA socket if possible. */
579 if (priv->rxqs_n && (*priv->rxqs)[0])
580 socket = container_of((*priv->rxqs)[0],
581 struct rxq_ctrl, rxq)->socket;
582 fdir_queue = priv_fdir_queue_create(priv, NULL, socket);
583 priv->fdir_drop_queue = fdir_queue;
589 * Enable flow director filter and create steering rules.
593 * @param mlx5_fdir_filter
594 * Filter to create steering rule for.
597 * 0 on success, errno value on failure.
600 priv_fdir_filter_enable(struct priv *priv,
601 struct mlx5_fdir_filter *mlx5_fdir_filter)
603 struct fdir_queue *fdir_queue;
605 /* Check if flow already exists. */
606 if (mlx5_fdir_filter->flow != NULL)
609 /* Get fdir_queue for specific queue. */
610 if (mlx5_fdir_filter->behavior == RTE_ETH_FDIR_REJECT)
611 fdir_queue = priv_get_fdir_drop_queue(priv);
613 fdir_queue = priv_get_fdir_queue(priv,
614 mlx5_fdir_filter->queue);
616 if (fdir_queue == NULL) {
617 ERROR("failed to create flow director rxq for queue %d",
618 mlx5_fdir_filter->queue);
623 return priv_fdir_flow_add(priv, mlx5_fdir_filter, fdir_queue);
627 * Initialize flow director filters list.
633 * 0 on success, errno value on failure.
636 fdir_init_filters_list(struct priv *priv)
638 /* Filter list initialization should be done only once. */
639 if (priv->fdir_filter_list)
642 /* Create filters list. */
643 priv->fdir_filter_list =
644 rte_calloc(__func__, 1, sizeof(*priv->fdir_filter_list), 0);
646 if (priv->fdir_filter_list == NULL) {
649 ERROR("cannot allocate flow director filter list: %s",
654 LIST_INIT(priv->fdir_filter_list);
666 priv_fdir_filter_flush(struct priv *priv)
668 struct mlx5_fdir_filter *mlx5_fdir_filter;
670 while ((mlx5_fdir_filter = LIST_FIRST(priv->fdir_filter_list))) {
671 struct ibv_flow *flow = mlx5_fdir_filter->flow;
673 DEBUG("%p: flushing flow director filter %p",
674 (void *)priv, (void *)mlx5_fdir_filter);
675 LIST_REMOVE(mlx5_fdir_filter, next);
677 claim_zero(ibv_destroy_flow(flow));
678 rte_free(mlx5_fdir_filter);
683 * Remove all flow director filters and delete list.
689 priv_fdir_delete_filters_list(struct priv *priv)
691 priv_fdir_filter_flush(priv);
692 rte_free(priv->fdir_filter_list);
693 priv->fdir_filter_list = NULL;
697 * Disable flow director, remove all steering rules.
703 priv_fdir_disable(struct priv *priv)
706 struct mlx5_fdir_filter *mlx5_fdir_filter;
708 /* Run on every flow director filter and destroy flow handle. */
709 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
710 struct ibv_flow *flow;
712 /* Only valid elements should be in the list */
713 assert(mlx5_fdir_filter != NULL);
714 flow = mlx5_fdir_filter->flow;
716 /* Destroy flow handle */
718 claim_zero(ibv_destroy_flow(flow));
719 mlx5_fdir_filter->flow = NULL;
723 /* Destroy flow director context in each RX queue. */
724 for (i = 0; (i != priv->rxqs_n); i++) {
725 struct rxq_ctrl *rxq_ctrl;
727 if (!(*priv->rxqs)[i])
729 rxq_ctrl = container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
730 if (!rxq_ctrl->fdir_queue)
732 priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue);
733 rxq_ctrl->fdir_queue = NULL;
735 if (priv->fdir_drop_queue) {
736 priv_fdir_queue_destroy(priv, priv->fdir_drop_queue);
737 priv->fdir_drop_queue = NULL;
742 * Enable flow director, create steering rules.
748 priv_fdir_enable(struct priv *priv)
750 struct mlx5_fdir_filter *mlx5_fdir_filter;
752 /* Run on every fdir filter and create flow handle */
753 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
754 /* Only valid elements should be in the list */
755 assert(mlx5_fdir_filter != NULL);
757 priv_fdir_filter_enable(priv, mlx5_fdir_filter);
762 * Find specific filter in list.
767 * Flow director filter to find.
770 * Filter element if found, otherwise NULL.
772 static struct mlx5_fdir_filter *
773 priv_find_filter_in_list(struct priv *priv,
774 const struct rte_eth_fdir_filter *fdir_filter)
776 struct fdir_flow_desc desc;
777 struct mlx5_fdir_filter *mlx5_fdir_filter;
778 enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
780 /* Get flow director filter to look for. */
781 fdir_filter_to_flow_desc(fdir_filter, &desc, fdir_mode);
783 /* Look for the requested element. */
784 LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
785 /* Only valid elements should be in the list. */
786 assert(mlx5_fdir_filter != NULL);
788 /* Return matching filter. */
789 if (!memcmp(&desc, &mlx5_fdir_filter->desc, sizeof(desc)))
790 return mlx5_fdir_filter;
793 /* Filter not found */
798 * Add new flow director filter and store it in list.
803 * Flow director filter to add.
806 * 0 on success, errno value on failure.
809 priv_fdir_filter_add(struct priv *priv,
810 const struct rte_eth_fdir_filter *fdir_filter)
812 struct mlx5_fdir_filter *mlx5_fdir_filter;
813 enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
816 /* Validate queue number. */
817 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
818 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
822 /* Duplicate filters are currently unsupported. */
823 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
824 if (mlx5_fdir_filter != NULL) {
825 ERROR("filter already exists");
829 /* Create new flow director filter. */
831 rte_calloc(__func__, 1, sizeof(*mlx5_fdir_filter), 0);
832 if (mlx5_fdir_filter == NULL) {
834 ERROR("cannot allocate flow director filter: %s",
839 /* Set action parameters. */
840 mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
841 mlx5_fdir_filter->behavior = fdir_filter->action.behavior;
843 /* Convert to mlx5 filter descriptor. */
844 fdir_filter_to_flow_desc(fdir_filter,
845 &mlx5_fdir_filter->desc, fdir_mode);
847 /* Insert new filter into list. */
848 LIST_INSERT_HEAD(priv->fdir_filter_list, mlx5_fdir_filter, next);
850 DEBUG("%p: flow director filter %p added",
851 (void *)priv, (void *)mlx5_fdir_filter);
853 /* Enable filter immediately if device is started. */
855 err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
861 * Update queue for specific filter.
866 * Filter to be updated.
869 * 0 on success, errno value on failure.
872 priv_fdir_filter_update(struct priv *priv,
873 const struct rte_eth_fdir_filter *fdir_filter)
875 struct mlx5_fdir_filter *mlx5_fdir_filter;
877 /* Validate queue number. */
878 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
879 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
883 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
884 if (mlx5_fdir_filter != NULL) {
885 struct ibv_flow *flow = mlx5_fdir_filter->flow;
888 /* Update queue number. */
889 mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
891 /* Destroy flow handle. */
893 claim_zero(ibv_destroy_flow(flow));
894 mlx5_fdir_filter->flow = NULL;
896 DEBUG("%p: flow director filter %p updated",
897 (void *)priv, (void *)mlx5_fdir_filter);
899 /* Enable filter if device is started. */
901 err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
906 /* Filter not found, create it. */
907 DEBUG("%p: filter not found for update, creating new filter",
909 return priv_fdir_filter_add(priv, fdir_filter);
913 * Delete specific filter.
918 * Filter to be deleted.
921 * 0 on success, errno value on failure.
924 priv_fdir_filter_delete(struct priv *priv,
925 const struct rte_eth_fdir_filter *fdir_filter)
927 struct mlx5_fdir_filter *mlx5_fdir_filter;
929 mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
930 if (mlx5_fdir_filter != NULL) {
931 struct ibv_flow *flow = mlx5_fdir_filter->flow;
933 /* Remove element from list. */
934 LIST_REMOVE(mlx5_fdir_filter, next);
936 /* Destroy flow handle. */
938 claim_zero(ibv_destroy_flow(flow));
939 mlx5_fdir_filter->flow = NULL;
942 DEBUG("%p: flow director filter %p deleted",
943 (void *)priv, (void *)mlx5_fdir_filter);
946 rte_free(mlx5_fdir_filter);
951 ERROR("%p: flow director delete failed, cannot find filter",
957 * Get flow director information.
961 * @param[out] fdir_info
962 * Resulting flow director information.
965 priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
967 struct rte_eth_fdir_masks *mask =
968 &priv->dev->data->dev_conf.fdir_conf.mask;
970 fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
971 fdir_info->guarant_spc = 0;
973 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
975 fdir_info->max_flexpayload = 0;
976 fdir_info->flow_types_mask[0] = 0;
978 fdir_info->flex_payload_unit = 0;
979 fdir_info->max_flex_payload_segment_num = 0;
980 fdir_info->flex_payload_limit = 0;
981 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
985 * Deal with flow director operations.
988 * Pointer to private structure.
990 * Operation to perform.
992 * Pointer to operation-specific structure.
995 * 0 on success, errno value on failure.
998 priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
1000 enum rte_fdir_mode fdir_mode =
1001 priv->dev->data->dev_conf.fdir_conf.mode;
1004 if (filter_op == RTE_ETH_FILTER_NOP)
1007 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
1008 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1009 ERROR("%p: flow director mode %d not supported",
1010 (void *)priv, fdir_mode);
1014 switch (filter_op) {
1015 case RTE_ETH_FILTER_ADD:
1016 ret = priv_fdir_filter_add(priv, arg);
1018 case RTE_ETH_FILTER_UPDATE:
1019 ret = priv_fdir_filter_update(priv, arg);
1021 case RTE_ETH_FILTER_DELETE:
1022 ret = priv_fdir_filter_delete(priv, arg);
1024 case RTE_ETH_FILTER_FLUSH:
1025 priv_fdir_filter_flush(priv);
1027 case RTE_ETH_FILTER_INFO:
1028 priv_fdir_info_get(priv, arg);
1031 DEBUG("%p: unknown operation %u", (void *)priv, filter_op);
1038 static const struct rte_flow_ops mlx5_flow_ops = {
1039 .validate = mlx5_flow_validate,
1040 .create = mlx5_flow_create,
1041 .destroy = mlx5_flow_destroy,
1042 .flush = mlx5_flow_flush,
1044 .isolate = mlx5_flow_isolate,
1048 * Manage filter operations.
1051 * Pointer to Ethernet device structure.
1052 * @param filter_type
1055 * Operation to perform.
1057 * Pointer to operation-specific structure.
1060 * 0 on success, negative errno value on failure.
1063 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
1064 enum rte_filter_type filter_type,
1065 enum rte_filter_op filter_op,
1069 struct priv *priv = dev->data->dev_private;
1071 if (mlx5_is_secondary())
1072 return -E_RTE_SECONDARY;
1073 switch (filter_type) {
1074 case RTE_ETH_FILTER_GENERIC:
1075 if (filter_op != RTE_ETH_FILTER_GET)
1077 *(const void **)arg = &mlx5_flow_ops;
1079 case RTE_ETH_FILTER_FDIR:
1081 ret = priv_fdir_ctrl_func(priv, filter_op, arg);
1085 ERROR("%p: filter type (%d) not supported",
1086 (void *)dev, filter_type);