net/mlx5: split Rx queue structure
[dpdk.git] / drivers / net / mlx5 / mlx5_fdir.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stddef.h>
35 #include <assert.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <errno.h>
39
40 /* Verbs header. */
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42 #ifdef PEDANTIC
43 #pragma GCC diagnostic ignored "-pedantic"
44 #endif
45 #include <infiniband/verbs.h>
46 #ifdef PEDANTIC
47 #pragma GCC diagnostic error "-pedantic"
48 #endif
49
50 /* DPDK headers don't like -pedantic. */
51 #ifdef PEDANTIC
52 #pragma GCC diagnostic ignored "-pedantic"
53 #endif
54 #include <rte_ether.h>
55 #include <rte_malloc.h>
56 #include <rte_ethdev.h>
57 #include <rte_common.h>
58 #ifdef PEDANTIC
59 #pragma GCC diagnostic error "-pedantic"
60 #endif
61
62 #include "mlx5.h"
63 #include "mlx5_rxtx.h"
64
65 struct fdir_flow_desc {
66         uint16_t dst_port;
67         uint16_t src_port;
68         uint32_t src_ip[4];
69         uint32_t dst_ip[4];
70         uint8_t mac[6];
71         uint16_t vlan_tag;
72         enum hash_rxq_type type;
73 };
74
75 struct mlx5_fdir_filter {
76         LIST_ENTRY(mlx5_fdir_filter) next;
77         uint16_t queue; /* Queue assigned to if FDIR match. */
78         struct fdir_flow_desc desc;
79         struct ibv_exp_flow *flow;
80 };
81
82 LIST_HEAD(fdir_filter_list, mlx5_fdir_filter);
83
84 /**
85  * Convert struct rte_eth_fdir_filter to mlx5 filter descriptor.
86  *
87  * @param[in] fdir_filter
88  *   DPDK filter structure to convert.
89  * @param[out] desc
90  *   Resulting mlx5 filter descriptor.
91  * @param mode
92  *   Flow director mode.
93  */
94 static void
95 fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
96                          struct fdir_flow_desc *desc, enum rte_fdir_mode mode)
97 {
98         /* Initialize descriptor. */
99         memset(desc, 0, sizeof(*desc));
100
101         /* Set VLAN ID. */
102         desc->vlan_tag = fdir_filter->input.flow_ext.vlan_tci;
103
104         /* Set MAC address. */
105         if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
106                 rte_memcpy(desc->mac,
107                            fdir_filter->input.flow.mac_vlan_flow.mac_addr.
108                                 addr_bytes,
109                            sizeof(desc->mac));
110                 desc->type = HASH_RXQ_ETH;
111                 return;
112         }
113
114         /* Set mode */
115         switch (fdir_filter->input.flow_type) {
116         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
117                 desc->type = HASH_RXQ_UDPV4;
118                 break;
119         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
120                 desc->type = HASH_RXQ_TCPV4;
121                 break;
122         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
123                 desc->type = HASH_RXQ_IPV4;
124                 break;
125 #ifdef HAVE_FLOW_SPEC_IPV6
126         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
127                 desc->type = HASH_RXQ_UDPV6;
128                 break;
129         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
130                 desc->type = HASH_RXQ_TCPV6;
131                 break;
132         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
133                 desc->type = HASH_RXQ_IPV6;
134                 break;
135 #endif /* HAVE_FLOW_SPEC_IPV6 */
136         default:
137                 break;
138         }
139
140         /* Set flow values */
141         switch (fdir_filter->input.flow_type) {
142         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
143         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
144                 desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
145                 desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
146         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
147                 desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
148                 desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
149                 break;
150 #ifdef HAVE_FLOW_SPEC_IPV6
151         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
152         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
153                 desc->src_port = fdir_filter->input.flow.udp6_flow.src_port;
154                 desc->dst_port = fdir_filter->input.flow.udp6_flow.dst_port;
155                 /* Fall through. */
156         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
157                 rte_memcpy(desc->src_ip,
158                            fdir_filter->input.flow.ipv6_flow.src_ip,
159                            sizeof(desc->src_ip));
160                 rte_memcpy(desc->dst_ip,
161                            fdir_filter->input.flow.ipv6_flow.dst_ip,
162                            sizeof(desc->dst_ip));
163                 break;
164 #endif /* HAVE_FLOW_SPEC_IPV6 */
165         default:
166                 break;
167         }
168 }
169
170 /**
171  * Check if two flow descriptors overlap according to configured mask.
172  *
173  * @param priv
174  *   Private structure that provides flow director mask.
175  * @param desc1
176  *   First flow descriptor to compare.
177  * @param desc2
178  *   Second flow descriptor to compare.
179  *
180  * @return
181  *   Nonzero if descriptors overlap.
182  */
183 static int
184 priv_fdir_overlap(const struct priv *priv,
185                   const struct fdir_flow_desc *desc1,
186                   const struct fdir_flow_desc *desc2)
187 {
188         const struct rte_eth_fdir_masks *mask =
189                 &priv->dev->data->dev_conf.fdir_conf.mask;
190         unsigned int i;
191
192         if (desc1->type != desc2->type)
193                 return 0;
194         /* Ignore non masked bits. */
195         for (i = 0; i != RTE_DIM(desc1->mac); ++i)
196                 if ((desc1->mac[i] & mask->mac_addr_byte_mask) !=
197                     (desc2->mac[i] & mask->mac_addr_byte_mask))
198                         return 0;
199         if (((desc1->src_port & mask->src_port_mask) !=
200              (desc2->src_port & mask->src_port_mask)) ||
201             ((desc1->dst_port & mask->dst_port_mask) !=
202              (desc2->dst_port & mask->dst_port_mask)))
203                 return 0;
204         switch (desc1->type) {
205         case HASH_RXQ_IPV4:
206         case HASH_RXQ_UDPV4:
207         case HASH_RXQ_TCPV4:
208                 if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) !=
209                      (desc2->src_ip[0] & mask->ipv4_mask.src_ip)) ||
210                     ((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) !=
211                      (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
212                         return 0;
213                 break;
214 #ifdef HAVE_FLOW_SPEC_IPV6
215         case HASH_RXQ_IPV6:
216         case HASH_RXQ_UDPV6:
217         case HASH_RXQ_TCPV6:
218                 for (i = 0; i != RTE_DIM(desc1->src_ip); ++i)
219                         if (((desc1->src_ip[i] & mask->ipv6_mask.src_ip[i]) !=
220                              (desc2->src_ip[i] & mask->ipv6_mask.src_ip[i])) ||
221                             ((desc1->dst_ip[i] & mask->ipv6_mask.dst_ip[i]) !=
222                              (desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i])))
223                                 return 0;
224                 break;
225 #endif /* HAVE_FLOW_SPEC_IPV6 */
226         default:
227                 break;
228         }
229         return 1;
230 }
231
232 /**
233  * Create flow director steering rule for a specific filter.
234  *
235  * @param priv
236  *   Private structure.
237  * @param mlx5_fdir_filter
238  *   Filter to create a steering rule for.
239  * @param fdir_queue
240  *   Flow director queue for matching packets.
241  *
242  * @return
243  *   0 on success, errno value on failure.
244  */
245 static int
246 priv_fdir_flow_add(struct priv *priv,
247                    struct mlx5_fdir_filter *mlx5_fdir_filter,
248                    struct fdir_queue *fdir_queue)
249 {
250         struct ibv_exp_flow *flow;
251         struct fdir_flow_desc *desc = &mlx5_fdir_filter->desc;
252         enum rte_fdir_mode fdir_mode =
253                 priv->dev->data->dev_conf.fdir_conf.mode;
254         struct rte_eth_fdir_masks *mask =
255                 &priv->dev->data->dev_conf.fdir_conf.mask;
256         FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, desc->type));
257         struct ibv_exp_flow_attr *attr = &data->attr;
258         uintptr_t spec_offset = (uintptr_t)&data->spec;
259         struct ibv_exp_flow_spec_eth *spec_eth;
260         struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
261 #ifdef HAVE_FLOW_SPEC_IPV6
262         struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
263 #endif /* HAVE_FLOW_SPEC_IPV6 */
264         struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
265         struct mlx5_fdir_filter *iter_fdir_filter;
266         unsigned int i;
267
268         /* Abort if an existing flow overlaps this one to avoid packet
269          * duplication, even if it targets another queue. */
270         LIST_FOREACH(iter_fdir_filter, priv->fdir_filter_list, next)
271                 if ((iter_fdir_filter != mlx5_fdir_filter) &&
272                     (iter_fdir_filter->flow != NULL) &&
273                     (priv_fdir_overlap(priv,
274                                        &mlx5_fdir_filter->desc,
275                                        &iter_fdir_filter->desc)))
276                         return EEXIST;
277
278         /*
279          * No padding must be inserted by the compiler between attr and spec.
280          * This layout is expected by libibverbs.
281          */
282         assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec_offset);
283         priv_flow_attr(priv, attr, sizeof(data), desc->type);
284
285         /* Set Ethernet spec */
286         spec_eth = (struct ibv_exp_flow_spec_eth *)spec_offset;
287
288         /* The first specification must be Ethernet. */
289         assert(spec_eth->type == IBV_EXP_FLOW_SPEC_ETH);
290         assert(spec_eth->size == sizeof(*spec_eth));
291
292         /* VLAN ID */
293         spec_eth->val.vlan_tag = desc->vlan_tag & mask->vlan_tci_mask;
294         spec_eth->mask.vlan_tag = mask->vlan_tci_mask;
295
296         /* Update priority */
297         attr->priority = 2;
298
299         if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
300                 /* MAC Address */
301                 for (i = 0; i != RTE_DIM(spec_eth->mask.dst_mac); ++i) {
302                         spec_eth->val.dst_mac[i] =
303                                 desc->mac[i] & mask->mac_addr_byte_mask;
304                         spec_eth->mask.dst_mac[i] = mask->mac_addr_byte_mask;
305                 }
306                 goto create_flow;
307         }
308
309         switch (desc->type) {
310         case HASH_RXQ_IPV4:
311         case HASH_RXQ_UDPV4:
312         case HASH_RXQ_TCPV4:
313                 spec_offset += spec_eth->size;
314
315                 /* Set IP spec */
316                 spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;
317
318                 /* The second specification must be IP. */
319                 assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);
320                 assert(spec_ipv4->size == sizeof(*spec_ipv4));
321
322                 spec_ipv4->val.src_ip =
323                         desc->src_ip[0] & mask->ipv4_mask.src_ip;
324                 spec_ipv4->val.dst_ip =
325                         desc->dst_ip[0] & mask->ipv4_mask.dst_ip;
326                 spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip;
327                 spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip;
328
329                 /* Update priority */
330                 attr->priority = 1;
331
332                 if (desc->type == HASH_RXQ_IPV4)
333                         goto create_flow;
334
335                 spec_offset += spec_ipv4->size;
336                 break;
337 #ifdef HAVE_FLOW_SPEC_IPV6
338         case HASH_RXQ_IPV6:
339         case HASH_RXQ_UDPV6:
340         case HASH_RXQ_TCPV6:
341                 spec_offset += spec_eth->size;
342
343                 /* Set IP spec */
344                 spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;
345
346                 /* The second specification must be IP. */
347                 assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);
348                 assert(spec_ipv6->size == sizeof(*spec_ipv6));
349
350                 for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
351                         ((uint32_t *)spec_ipv6->val.src_ip)[i] =
352                                 desc->src_ip[i] & mask->ipv6_mask.src_ip[i];
353                         ((uint32_t *)spec_ipv6->val.dst_ip)[i] =
354                                 desc->dst_ip[i] & mask->ipv6_mask.dst_ip[i];
355                 }
356                 rte_memcpy(spec_ipv6->mask.src_ip,
357                            mask->ipv6_mask.src_ip,
358                            sizeof(spec_ipv6->mask.src_ip));
359                 rte_memcpy(spec_ipv6->mask.dst_ip,
360                            mask->ipv6_mask.dst_ip,
361                            sizeof(spec_ipv6->mask.dst_ip));
362
363                 /* Update priority */
364                 attr->priority = 1;
365
366                 if (desc->type == HASH_RXQ_IPV6)
367                         goto create_flow;
368
369                 spec_offset += spec_ipv6->size;
370                 break;
371 #endif /* HAVE_FLOW_SPEC_IPV6 */
372         default:
373                 ERROR("invalid flow attribute type");
374                 return EINVAL;
375         }
376
377         /* Set TCP/UDP flow specification. */
378         spec_tcp_udp = (struct ibv_exp_flow_spec_tcp_udp *)spec_offset;
379
380         /* The third specification must be TCP/UDP. */
381         assert(spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_TCP ||
382                spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_UDP);
383         assert(spec_tcp_udp->size == sizeof(*spec_tcp_udp));
384
385         spec_tcp_udp->val.src_port = desc->src_port & mask->src_port_mask;
386         spec_tcp_udp->val.dst_port = desc->dst_port & mask->dst_port_mask;
387         spec_tcp_udp->mask.src_port = mask->src_port_mask;
388         spec_tcp_udp->mask.dst_port = mask->dst_port_mask;
389
390         /* Update priority */
391         attr->priority = 0;
392
393 create_flow:
394
395         errno = 0;
396         flow = ibv_exp_create_flow(fdir_queue->qp, attr);
397         if (flow == NULL) {
398                 /* It's not clear whether errno is always set in this case. */
399                 ERROR("%p: flow director configuration failed, errno=%d: %s",
400                       (void *)priv, errno,
401                       (errno ? strerror(errno) : "Unknown error"));
402                 if (errno)
403                         return errno;
404                 return EINVAL;
405         }
406
407         DEBUG("%p: added flow director rule (%p)", (void *)priv, (void *)flow);
408         mlx5_fdir_filter->flow = flow;
409         return 0;
410 }
411
412 /**
413  * Get flow director queue for a specific RX queue, create it in case
414  * it does not exist.
415  *
416  * @param priv
417  *   Private structure.
418  * @param idx
419  *   RX queue index.
420  *
421  * @return
422  *   Related flow director queue on success, NULL otherwise.
423  */
424 static struct fdir_queue *
425 priv_get_fdir_queue(struct priv *priv, uint16_t idx)
426 {
427         struct rxq_ctrl *rxq_ctrl =
428                 container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
429         struct fdir_queue *fdir_queue = &rxq_ctrl->fdir_queue;
430         struct ibv_exp_rwq_ind_table *ind_table = NULL;
431         struct ibv_qp *qp = NULL;
432         struct ibv_exp_rwq_ind_table_init_attr ind_init_attr;
433         struct ibv_exp_rx_hash_conf hash_conf;
434         struct ibv_exp_qp_init_attr qp_init_attr;
435         int err = 0;
436
437         /* Return immediately if it has already been created. */
438         if (fdir_queue->qp != NULL)
439                 return fdir_queue;
440
441         ind_init_attr = (struct ibv_exp_rwq_ind_table_init_attr){
442                 .pd = priv->pd,
443                 .log_ind_tbl_size = 0,
444                 .ind_tbl = &((*priv->rxqs)[idx]->wq),
445                 .comp_mask = 0,
446         };
447
448         errno = 0;
449         ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
450                                                  &ind_init_attr);
451         if (ind_table == NULL) {
452                 /* Not clear whether errno is set. */
453                 err = (errno ? errno : EINVAL);
454                 ERROR("RX indirection table creation failed with error %d: %s",
455                       err, strerror(err));
456                 goto error;
457         }
458
459         /* Create fdir_queue qp. */
460         hash_conf = (struct ibv_exp_rx_hash_conf){
461                 .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
462                 .rx_hash_key_len = rss_hash_default_key_len,
463                 .rx_hash_key = rss_hash_default_key,
464                 .rx_hash_fields_mask = 0,
465                 .rwq_ind_tbl = ind_table,
466         };
467         qp_init_attr = (struct ibv_exp_qp_init_attr){
468                 .max_inl_recv = 0, /* Currently not supported. */
469                 .qp_type = IBV_QPT_RAW_PACKET,
470                 .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
471                               IBV_EXP_QP_INIT_ATTR_RX_HASH),
472                 .pd = priv->pd,
473                 .rx_hash_conf = &hash_conf,
474                 .port_num = priv->port,
475         };
476
477         qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr);
478         if (qp == NULL) {
479                 err = (errno ? errno : EINVAL);
480                 ERROR("hash RX QP creation failure: %s", strerror(err));
481                 goto error;
482         }
483
484         fdir_queue->ind_table = ind_table;
485         fdir_queue->qp = qp;
486
487         return fdir_queue;
488
489 error:
490         if (qp != NULL)
491                 claim_zero(ibv_destroy_qp(qp));
492
493         if (ind_table != NULL)
494                 claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
495
496         return NULL;
497 }
498
499 /**
500  * Enable flow director filter and create steering rules.
501  *
502  * @param priv
503  *   Private structure.
504  * @param mlx5_fdir_filter
505  *   Filter to create steering rule for.
506  *
507  * @return
508  *   0 on success, errno value on failure.
509  */
510 static int
511 priv_fdir_filter_enable(struct priv *priv,
512                         struct mlx5_fdir_filter *mlx5_fdir_filter)
513 {
514         struct fdir_queue *fdir_queue;
515
516         /* Check if flow already exists. */
517         if (mlx5_fdir_filter->flow != NULL)
518                 return 0;
519
520         /* Get fdir_queue for specific queue. */
521         fdir_queue = priv_get_fdir_queue(priv, mlx5_fdir_filter->queue);
522
523         if (fdir_queue == NULL) {
524                 ERROR("failed to create flow director rxq for queue %d",
525                       mlx5_fdir_filter->queue);
526                 return EINVAL;
527         }
528
529         /* Create flow */
530         return priv_fdir_flow_add(priv, mlx5_fdir_filter, fdir_queue);
531 }
532
533 /**
534  * Initialize flow director filters list.
535  *
536  * @param priv
537  *   Private structure.
538  *
539  * @return
540  *   0 on success, errno value on failure.
541  */
542 int
543 fdir_init_filters_list(struct priv *priv)
544 {
545         /* Filter list initialization should be done only once. */
546         if (priv->fdir_filter_list)
547                 return 0;
548
549         /* Create filters list. */
550         priv->fdir_filter_list =
551                 rte_calloc(__func__, 1, sizeof(*priv->fdir_filter_list), 0);
552
553         if (priv->fdir_filter_list == NULL) {
554                 int err = ENOMEM;
555
556                 ERROR("cannot allocate flow director filter list: %s",
557                       strerror(err));
558                 return err;
559         }
560
561         LIST_INIT(priv->fdir_filter_list);
562
563         return 0;
564 }
565
566 /**
567  * Flush all filters.
568  *
569  * @param priv
570  *   Private structure.
571  */
572 static void
573 priv_fdir_filter_flush(struct priv *priv)
574 {
575         struct mlx5_fdir_filter *mlx5_fdir_filter;
576
577         while ((mlx5_fdir_filter = LIST_FIRST(priv->fdir_filter_list))) {
578                 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
579
580                 DEBUG("%p: flushing flow director filter %p",
581                       (void *)priv, (void *)mlx5_fdir_filter);
582                 LIST_REMOVE(mlx5_fdir_filter, next);
583                 if (flow != NULL)
584                         claim_zero(ibv_exp_destroy_flow(flow));
585                 rte_free(mlx5_fdir_filter);
586         }
587 }
588
589 /**
590  * Remove all flow director filters and delete list.
591  *
592  * @param priv
593  *   Private structure.
594  */
595 void
596 priv_fdir_delete_filters_list(struct priv *priv)
597 {
598         priv_fdir_filter_flush(priv);
599         rte_free(priv->fdir_filter_list);
600         priv->fdir_filter_list = NULL;
601 }
602
603 /**
604  * Disable flow director, remove all steering rules.
605  *
606  * @param priv
607  *   Private structure.
608  */
609 void
610 priv_fdir_disable(struct priv *priv)
611 {
612         unsigned int i;
613         struct mlx5_fdir_filter *mlx5_fdir_filter;
614         struct fdir_queue *fdir_queue;
615
616         /* Run on every flow director filter and destroy flow handle. */
617         LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
618                 struct ibv_exp_flow *flow;
619
620                 /* Only valid elements should be in the list */
621                 assert(mlx5_fdir_filter != NULL);
622                 flow = mlx5_fdir_filter->flow;
623
624                 /* Destroy flow handle */
625                 if (flow != NULL) {
626                         claim_zero(ibv_exp_destroy_flow(flow));
627                         mlx5_fdir_filter->flow = NULL;
628                 }
629         }
630
631         /* Run on every RX queue to destroy related flow director QP and
632          * indirection table. */
633         for (i = 0; (i != priv->rxqs_n); i++) {
634                 struct rxq_ctrl *rxq_ctrl =
635                         container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
636
637                 fdir_queue = &rxq_ctrl->fdir_queue;
638                 if (fdir_queue->qp != NULL) {
639                         claim_zero(ibv_destroy_qp(fdir_queue->qp));
640                         fdir_queue->qp = NULL;
641                 }
642
643                 if (fdir_queue->ind_table != NULL) {
644                         claim_zero(ibv_exp_destroy_rwq_ind_table
645                                    (fdir_queue->ind_table));
646                         fdir_queue->ind_table = NULL;
647                 }
648         }
649 }
650
651 /**
652  * Enable flow director, create steering rules.
653  *
654  * @param priv
655  *   Private structure.
656  */
657 void
658 priv_fdir_enable(struct priv *priv)
659 {
660         struct mlx5_fdir_filter *mlx5_fdir_filter;
661
662         /* Run on every fdir filter and create flow handle */
663         LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
664                 /* Only valid elements should be in the list */
665                 assert(mlx5_fdir_filter != NULL);
666
667                 priv_fdir_filter_enable(priv, mlx5_fdir_filter);
668         }
669 }
670
671 /**
672  * Find specific filter in list.
673  *
674  * @param priv
675  *   Private structure.
676  * @param fdir_filter
677  *   Flow director filter to find.
678  *
679  * @return
680  *   Filter element if found, otherwise NULL.
681  */
682 static struct mlx5_fdir_filter *
683 priv_find_filter_in_list(struct priv *priv,
684                          const struct rte_eth_fdir_filter *fdir_filter)
685 {
686         struct fdir_flow_desc desc;
687         struct mlx5_fdir_filter *mlx5_fdir_filter;
688         enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
689
690         /* Get flow director filter to look for. */
691         fdir_filter_to_flow_desc(fdir_filter, &desc, fdir_mode);
692
693         /* Look for the requested element. */
694         LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
695                 /* Only valid elements should be in the list. */
696                 assert(mlx5_fdir_filter != NULL);
697
698                 /* Return matching filter. */
699                 if (!memcmp(&desc, &mlx5_fdir_filter->desc, sizeof(desc)))
700                         return mlx5_fdir_filter;
701         }
702
703         /* Filter not found */
704         return NULL;
705 }
706
707 /**
708  * Add new flow director filter and store it in list.
709  *
710  * @param priv
711  *   Private structure.
712  * @param fdir_filter
713  *   Flow director filter to add.
714  *
715  * @return
716  *   0 on success, errno value on failure.
717  */
718 static int
719 priv_fdir_filter_add(struct priv *priv,
720                      const struct rte_eth_fdir_filter *fdir_filter)
721 {
722         struct mlx5_fdir_filter *mlx5_fdir_filter;
723         enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
724         int err = 0;
725
726         /* Validate queue number. */
727         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
728                 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
729                 return EINVAL;
730         }
731
732         /* Duplicate filters are currently unsupported. */
733         mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
734         if (mlx5_fdir_filter != NULL) {
735                 ERROR("filter already exists");
736                 return EINVAL;
737         }
738
739         /* Create new flow director filter. */
740         mlx5_fdir_filter =
741                 rte_calloc(__func__, 1, sizeof(*mlx5_fdir_filter), 0);
742         if (mlx5_fdir_filter == NULL) {
743                 err = ENOMEM;
744                 ERROR("cannot allocate flow director filter: %s",
745                       strerror(err));
746                 return err;
747         }
748
749         /* Set queue. */
750         mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
751
752         /* Convert to mlx5 filter descriptor. */
753         fdir_filter_to_flow_desc(fdir_filter,
754                                  &mlx5_fdir_filter->desc, fdir_mode);
755
756         /* Insert new filter into list. */
757         LIST_INSERT_HEAD(priv->fdir_filter_list, mlx5_fdir_filter, next);
758
759         DEBUG("%p: flow director filter %p added",
760               (void *)priv, (void *)mlx5_fdir_filter);
761
762         /* Enable filter immediately if device is started. */
763         if (priv->started)
764                 err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
765
766         return err;
767 }
768
769 /**
770  * Update queue for specific filter.
771  *
772  * @param priv
773  *   Private structure.
774  * @param fdir_filter
775  *   Filter to be updated.
776  *
777  * @return
778  *   0 on success, errno value on failure.
779  */
780 static int
781 priv_fdir_filter_update(struct priv *priv,
782                         const struct rte_eth_fdir_filter *fdir_filter)
783 {
784         struct mlx5_fdir_filter *mlx5_fdir_filter;
785
786         /* Validate queue number. */
787         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
788                 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
789                 return EINVAL;
790         }
791
792         mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
793         if (mlx5_fdir_filter != NULL) {
794                 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
795                 int err = 0;
796
797                 /* Update queue number. */
798                 mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
799
800                 /* Destroy flow handle. */
801                 if (flow != NULL) {
802                         claim_zero(ibv_exp_destroy_flow(flow));
803                         mlx5_fdir_filter->flow = NULL;
804                 }
805                 DEBUG("%p: flow director filter %p updated",
806                       (void *)priv, (void *)mlx5_fdir_filter);
807
808                 /* Enable filter if device is started. */
809                 if (priv->started)
810                         err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
811
812                 return err;
813         }
814
815         /* Filter not found, create it. */
816         DEBUG("%p: filter not found for update, creating new filter",
817               (void *)priv);
818         return priv_fdir_filter_add(priv, fdir_filter);
819 }
820
821 /**
822  * Delete specific filter.
823  *
824  * @param priv
825  *   Private structure.
826  * @param fdir_filter
827  *   Filter to be deleted.
828  *
829  * @return
830  *   0 on success, errno value on failure.
831  */
832 static int
833 priv_fdir_filter_delete(struct priv *priv,
834                         const struct rte_eth_fdir_filter *fdir_filter)
835 {
836         struct mlx5_fdir_filter *mlx5_fdir_filter;
837
838         mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
839         if (mlx5_fdir_filter != NULL) {
840                 struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
841
842                 /* Remove element from list. */
843                 LIST_REMOVE(mlx5_fdir_filter, next);
844
845                 /* Destroy flow handle. */
846                 if (flow != NULL) {
847                         claim_zero(ibv_exp_destroy_flow(flow));
848                         mlx5_fdir_filter->flow = NULL;
849                 }
850
851                 DEBUG("%p: flow director filter %p deleted",
852                       (void *)priv, (void *)mlx5_fdir_filter);
853
854                 /* Delete filter. */
855                 rte_free(mlx5_fdir_filter);
856
857                 return 0;
858         }
859
860         ERROR("%p: flow director delete failed, cannot find filter",
861               (void *)priv);
862         return EINVAL;
863 }
864
865 /**
866  * Get flow director information.
867  *
868  * @param priv
869  *   Private structure.
870  * @param[out] fdir_info
871  *   Resulting flow director information.
872  */
873 static void
874 priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
875 {
876         struct rte_eth_fdir_masks *mask =
877                 &priv->dev->data->dev_conf.fdir_conf.mask;
878
879         fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
880         fdir_info->guarant_spc = 0;
881
882         rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
883
884         fdir_info->max_flexpayload = 0;
885         fdir_info->flow_types_mask[0] = 0;
886
887         fdir_info->flex_payload_unit = 0;
888         fdir_info->max_flex_payload_segment_num = 0;
889         fdir_info->flex_payload_limit = 0;
890         memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
891 }
892
893 /**
894  * Deal with flow director operations.
895  *
896  * @param priv
897  *   Pointer to private structure.
898  * @param filter_op
899  *   Operation to perform.
900  * @param arg
901  *   Pointer to operation-specific structure.
902  *
903  * @return
904  *   0 on success, errno value on failure.
905  */
906 static int
907 priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
908 {
909         enum rte_fdir_mode fdir_mode =
910                 priv->dev->data->dev_conf.fdir_conf.mode;
911         int ret = 0;
912
913         if (filter_op == RTE_ETH_FILTER_NOP)
914                 return 0;
915
916         if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
917             fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
918                 ERROR("%p: flow director mode %d not supported",
919                       (void *)priv, fdir_mode);
920                 return EINVAL;
921         }
922
923         switch (filter_op) {
924         case RTE_ETH_FILTER_ADD:
925                 ret = priv_fdir_filter_add(priv, arg);
926                 break;
927         case RTE_ETH_FILTER_UPDATE:
928                 ret = priv_fdir_filter_update(priv, arg);
929                 break;
930         case RTE_ETH_FILTER_DELETE:
931                 ret = priv_fdir_filter_delete(priv, arg);
932                 break;
933         case RTE_ETH_FILTER_FLUSH:
934                 priv_fdir_filter_flush(priv);
935                 break;
936         case RTE_ETH_FILTER_INFO:
937                 priv_fdir_info_get(priv, arg);
938                 break;
939         default:
940                 DEBUG("%p: unknown operation %u", (void *)priv, filter_op);
941                 ret = EINVAL;
942                 break;
943         }
944         return ret;
945 }
946
947 /**
948  * Manage filter operations.
949  *
950  * @param dev
951  *   Pointer to Ethernet device structure.
952  * @param filter_type
953  *   Filter type.
954  * @param filter_op
955  *   Operation to perform.
956  * @param arg
957  *   Pointer to operation-specific structure.
958  *
959  * @return
960  *   0 on success, negative errno value on failure.
961  */
962 int
963 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
964                      enum rte_filter_type filter_type,
965                      enum rte_filter_op filter_op,
966                      void *arg)
967 {
968         int ret = -EINVAL;
969         struct priv *priv = dev->data->dev_private;
970
971         switch (filter_type) {
972         case RTE_ETH_FILTER_FDIR:
973                 priv_lock(priv);
974                 ret = priv_fdir_ctrl_func(priv, filter_op, arg);
975                 priv_unlock(priv);
976                 break;
977         default:
978                 ERROR("%p: filter type (%d) not supported",
979                       (void *)dev, filter_type);
980                 break;
981         }
982
983         return ret;
984 }