377b48be75cd38bc9a154df2ab3f28477388178f
[dpdk.git] / drivers / net / mlx4 / mlx4_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 /**
35  * @file
36  * Flow API operations for mlx4 driver.
37  */
38
39 #include <arpa/inet.h>
40 #include <assert.h>
41 #include <errno.h>
42 #include <stdalign.h>
43 #include <stddef.h>
44 #include <stdint.h>
45 #include <string.h>
46 #include <sys/queue.h>
47
48 /* Verbs headers do not support -pedantic. */
49 #ifdef PEDANTIC
50 #pragma GCC diagnostic ignored "-Wpedantic"
51 #endif
52 #include <infiniband/verbs.h>
53 #ifdef PEDANTIC
54 #pragma GCC diagnostic error "-Wpedantic"
55 #endif
56
57 #include <rte_byteorder.h>
58 #include <rte_errno.h>
59 #include <rte_eth_ctrl.h>
60 #include <rte_ethdev.h>
61 #include <rte_ether.h>
62 #include <rte_flow.h>
63 #include <rte_flow_driver.h>
64 #include <rte_malloc.h>
65
66 /* PMD headers. */
67 #include "mlx4.h"
68 #include "mlx4_flow.h"
69 #include "mlx4_rxtx.h"
70 #include "mlx4_utils.h"
71
72 /** Static initializer for a list of subsequent item types. */
73 #define NEXT_ITEM(...) \
74         (const enum rte_flow_item_type []){ \
75                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
76         }
77
78 /** Processor structure associated with a flow item. */
79 struct mlx4_flow_proc_item {
80         /** Bit-mask for fields supported by this PMD. */
81         const void *mask_support;
82         /** Bit-mask to use when @p item->mask is not provided. */
83         const void *mask_default;
84         /** Size in bytes for @p mask_support and @p mask_default. */
85         const unsigned int mask_sz;
86         /** Merge a pattern item into a flow rule handle. */
87         int (*merge)(struct rte_flow *flow,
88                      const struct rte_flow_item *item,
89                      const struct mlx4_flow_proc_item *proc,
90                      struct rte_flow_error *error);
91         /** Size in bytes of the destination structure. */
92         const unsigned int dst_sz;
93         /** List of possible subsequent items. */
94         const enum rte_flow_item_type *const next_item;
95 };
96
97 /** Shared resources for drop flow rules. */
98 struct mlx4_drop {
99         struct ibv_qp *qp; /**< QP target. */
100         struct ibv_cq *cq; /**< CQ associated with above QP. */
101         struct priv *priv; /**< Back pointer to private data. */
102         uint32_t refcnt; /**< Reference count. */
103 };
104
105 /**
106  * Merge Ethernet pattern item into flow rule handle.
107  *
108  * Additional mlx4-specific constraints on supported fields:
109  *
110  * - No support for partial masks.
111  * - Not providing @p item->spec or providing an empty @p mask->dst is
112  *   *only* supported if the rule doesn't specify additional matching
113  *   criteria (i.e. rule is promiscuous-like).
114  *
115  * @param[in, out] flow
116  *   Flow rule handle to update.
117  * @param[in] item
118  *   Pattern item to merge.
119  * @param[in] proc
120  *   Associated item-processing object.
121  * @param[out] error
122  *   Perform verbose error reporting if not NULL.
123  *
124  * @return
125  *   0 on success, a negative errno value otherwise and rte_errno is set.
126  */
127 static int
128 mlx4_flow_merge_eth(struct rte_flow *flow,
129                     const struct rte_flow_item *item,
130                     const struct mlx4_flow_proc_item *proc,
131                     struct rte_flow_error *error)
132 {
133         const struct rte_flow_item_eth *spec = item->spec;
134         const struct rte_flow_item_eth *mask =
135                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
136         struct ibv_flow_spec_eth *eth;
137         const char *msg;
138         unsigned int i;
139
140         if (!mask) {
141                 flow->promisc = 1;
142         } else {
143                 uint32_t sum_dst = 0;
144                 uint32_t sum_src = 0;
145
146                 for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
147                         sum_dst += mask->dst.addr_bytes[i];
148                         sum_src += mask->src.addr_bytes[i];
149                 }
150                 if (sum_src) {
151                         msg = "mlx4 does not support source MAC matching";
152                         goto error;
153                 } else if (!sum_dst) {
154                         flow->promisc = 1;
155                 } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
156                         msg = "mlx4 does not support matching partial"
157                                 " Ethernet fields";
158                         goto error;
159                 }
160         }
161         if (!flow->ibv_attr)
162                 return 0;
163         if (flow->promisc) {
164                 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
165                 return 0;
166         }
167         ++flow->ibv_attr->num_of_specs;
168         eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
169         *eth = (struct ibv_flow_spec_eth) {
170                 .type = IBV_FLOW_SPEC_ETH,
171                 .size = sizeof(*eth),
172         };
173         memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
174         memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
175         /* Remove unwanted bits from values. */
176         for (i = 0; i < ETHER_ADDR_LEN; ++i) {
177                 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
178         }
179         return 0;
180 error:
181         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
182                                   item, msg);
183 }
184
185 /**
186  * Merge VLAN pattern item into flow rule handle.
187  *
188  * Additional mlx4-specific constraints on supported fields:
189  *
190  * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
191  *   empty @p item->mask would also include non-VLAN traffic. Doing so is
192  *   therefore unsupported.
193  * - No support for partial masks.
194  *
195  * @param[in, out] flow
196  *   Flow rule handle to update.
197  * @param[in] item
198  *   Pattern item to merge.
199  * @param[in] proc
200  *   Associated item-processing object.
201  * @param[out] error
202  *   Perform verbose error reporting if not NULL.
203  *
204  * @return
205  *   0 on success, a negative errno value otherwise and rte_errno is set.
206  */
207 static int
208 mlx4_flow_merge_vlan(struct rte_flow *flow,
209                      const struct rte_flow_item *item,
210                      const struct mlx4_flow_proc_item *proc,
211                      struct rte_flow_error *error)
212 {
213         const struct rte_flow_item_vlan *spec = item->spec;
214         const struct rte_flow_item_vlan *mask =
215                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
216         struct ibv_flow_spec_eth *eth;
217         const char *msg;
218
219         if (!mask || !mask->tci) {
220                 msg = "mlx4 cannot match all VLAN traffic while excluding"
221                         " non-VLAN traffic, TCI VID must be specified";
222                 goto error;
223         }
224         if (mask->tci != RTE_BE16(0x0fff)) {
225                 msg = "mlx4 does not support partial TCI VID matching";
226                 goto error;
227         }
228         if (!flow->ibv_attr)
229                 return 0;
230         eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
231                        sizeof(*eth));
232         eth->val.vlan_tag = spec->tci;
233         eth->mask.vlan_tag = mask->tci;
234         eth->val.vlan_tag &= eth->mask.vlan_tag;
235         return 0;
236 error:
237         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
238                                   item, msg);
239 }
240
241 /**
242  * Merge IPv4 pattern item into flow rule handle.
243  *
244  * Additional mlx4-specific constraints on supported fields:
245  *
246  * - No support for partial masks.
247  *
248  * @param[in, out] flow
249  *   Flow rule handle to update.
250  * @param[in] item
251  *   Pattern item to merge.
252  * @param[in] proc
253  *   Associated item-processing object.
254  * @param[out] error
255  *   Perform verbose error reporting if not NULL.
256  *
257  * @return
258  *   0 on success, a negative errno value otherwise and rte_errno is set.
259  */
260 static int
261 mlx4_flow_merge_ipv4(struct rte_flow *flow,
262                      const struct rte_flow_item *item,
263                      const struct mlx4_flow_proc_item *proc,
264                      struct rte_flow_error *error)
265 {
266         const struct rte_flow_item_ipv4 *spec = item->spec;
267         const struct rte_flow_item_ipv4 *mask =
268                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
269         struct ibv_flow_spec_ipv4 *ipv4;
270         const char *msg;
271
272         if (mask &&
273             ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
274              (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
275                 msg = "mlx4 does not support matching partial IPv4 fields";
276                 goto error;
277         }
278         if (!flow->ibv_attr)
279                 return 0;
280         ++flow->ibv_attr->num_of_specs;
281         ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
282         *ipv4 = (struct ibv_flow_spec_ipv4) {
283                 .type = IBV_FLOW_SPEC_IPV4,
284                 .size = sizeof(*ipv4),
285         };
286         if (!spec)
287                 return 0;
288         ipv4->val = (struct ibv_flow_ipv4_filter) {
289                 .src_ip = spec->hdr.src_addr,
290                 .dst_ip = spec->hdr.dst_addr,
291         };
292         ipv4->mask = (struct ibv_flow_ipv4_filter) {
293                 .src_ip = mask->hdr.src_addr,
294                 .dst_ip = mask->hdr.dst_addr,
295         };
296         /* Remove unwanted bits from values. */
297         ipv4->val.src_ip &= ipv4->mask.src_ip;
298         ipv4->val.dst_ip &= ipv4->mask.dst_ip;
299         return 0;
300 error:
301         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
302                                   item, msg);
303 }
304
305 /**
306  * Merge UDP pattern item into flow rule handle.
307  *
308  * Additional mlx4-specific constraints on supported fields:
309  *
310  * - No support for partial masks.
311  *
312  * @param[in, out] flow
313  *   Flow rule handle to update.
314  * @param[in] item
315  *   Pattern item to merge.
316  * @param[in] proc
317  *   Associated item-processing object.
318  * @param[out] error
319  *   Perform verbose error reporting if not NULL.
320  *
321  * @return
322  *   0 on success, a negative errno value otherwise and rte_errno is set.
323  */
324 static int
325 mlx4_flow_merge_udp(struct rte_flow *flow,
326                     const struct rte_flow_item *item,
327                     const struct mlx4_flow_proc_item *proc,
328                     struct rte_flow_error *error)
329 {
330         const struct rte_flow_item_udp *spec = item->spec;
331         const struct rte_flow_item_udp *mask =
332                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
333         struct ibv_flow_spec_tcp_udp *udp;
334         const char *msg;
335
336         if (!mask ||
337             ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
338              (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
339                 msg = "mlx4 does not support matching partial UDP fields";
340                 goto error;
341         }
342         if (!flow->ibv_attr)
343                 return 0;
344         ++flow->ibv_attr->num_of_specs;
345         udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
346         *udp = (struct ibv_flow_spec_tcp_udp) {
347                 .type = IBV_FLOW_SPEC_UDP,
348                 .size = sizeof(*udp),
349         };
350         if (!spec)
351                 return 0;
352         udp->val.dst_port = spec->hdr.dst_port;
353         udp->val.src_port = spec->hdr.src_port;
354         udp->mask.dst_port = mask->hdr.dst_port;
355         udp->mask.src_port = mask->hdr.src_port;
356         /* Remove unwanted bits from values. */
357         udp->val.src_port &= udp->mask.src_port;
358         udp->val.dst_port &= udp->mask.dst_port;
359         return 0;
360 error:
361         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
362                                   item, msg);
363 }
364
365 /**
366  * Merge TCP pattern item into flow rule handle.
367  *
368  * Additional mlx4-specific constraints on supported fields:
369  *
370  * - No support for partial masks.
371  *
372  * @param[in, out] flow
373  *   Flow rule handle to update.
374  * @param[in] item
375  *   Pattern item to merge.
376  * @param[in] proc
377  *   Associated item-processing object.
378  * @param[out] error
379  *   Perform verbose error reporting if not NULL.
380  *
381  * @return
382  *   0 on success, a negative errno value otherwise and rte_errno is set.
383  */
384 static int
385 mlx4_flow_merge_tcp(struct rte_flow *flow,
386                     const struct rte_flow_item *item,
387                     const struct mlx4_flow_proc_item *proc,
388                     struct rte_flow_error *error)
389 {
390         const struct rte_flow_item_tcp *spec = item->spec;
391         const struct rte_flow_item_tcp *mask =
392                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
393         struct ibv_flow_spec_tcp_udp *tcp;
394         const char *msg;
395
396         if (!mask ||
397             ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
398              (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
399                 msg = "mlx4 does not support matching partial TCP fields";
400                 goto error;
401         }
402         if (!flow->ibv_attr)
403                 return 0;
404         ++flow->ibv_attr->num_of_specs;
405         tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
406         *tcp = (struct ibv_flow_spec_tcp_udp) {
407                 .type = IBV_FLOW_SPEC_TCP,
408                 .size = sizeof(*tcp),
409         };
410         if (!spec)
411                 return 0;
412         tcp->val.dst_port = spec->hdr.dst_port;
413         tcp->val.src_port = spec->hdr.src_port;
414         tcp->mask.dst_port = mask->hdr.dst_port;
415         tcp->mask.src_port = mask->hdr.src_port;
416         /* Remove unwanted bits from values. */
417         tcp->val.src_port &= tcp->mask.src_port;
418         tcp->val.dst_port &= tcp->mask.dst_port;
419         return 0;
420 error:
421         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
422                                   item, msg);
423 }
424
425 /**
426  * Perform basic sanity checks on a pattern item.
427  *
428  * @param[in] item
429  *   Item specification.
430  * @param[in] proc
431  *   Associated item-processing object.
432  * @param[out] error
433  *   Perform verbose error reporting if not NULL.
434  *
435  * @return
436  *   0 on success, a negative errno value otherwise and rte_errno is set.
437  */
438 static int
439 mlx4_flow_item_check(const struct rte_flow_item *item,
440                      const struct mlx4_flow_proc_item *proc,
441                      struct rte_flow_error *error)
442 {
443         const uint8_t *mask;
444         unsigned int i;
445
446         /* item->last and item->mask cannot exist without item->spec. */
447         if (!item->spec && (item->mask || item->last))
448                 return rte_flow_error_set
449                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
450                          "\"mask\" or \"last\" field provided without a"
451                          " corresponding \"spec\"");
452         /* No spec, no mask, no problem. */
453         if (!item->spec)
454                 return 0;
455         mask = item->mask ?
456                 (const uint8_t *)item->mask :
457                 (const uint8_t *)proc->mask_default;
458         assert(mask);
459         /*
460          * Single-pass check to make sure that:
461          * - Mask is supported, no bits are set outside proc->mask_support.
462          * - Both item->spec and item->last are included in mask.
463          */
464         for (i = 0; i != proc->mask_sz; ++i) {
465                 if (!mask[i])
466                         continue;
467                 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
468                     ((const uint8_t *)proc->mask_support)[i])
469                         return rte_flow_error_set
470                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
471                                  item, "unsupported field found in \"mask\"");
472                 if (item->last &&
473                     (((const uint8_t *)item->spec)[i] & mask[i]) !=
474                     (((const uint8_t *)item->last)[i] & mask[i]))
475                         return rte_flow_error_set
476                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
477                                  item,
478                                  "range between \"spec\" and \"last\""
479                                  " is larger than \"mask\"");
480         }
481         return 0;
482 }
483
484 /** Graph of supported items and associated actions. */
485 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
486         [RTE_FLOW_ITEM_TYPE_END] = {
487                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
488         },
489         [RTE_FLOW_ITEM_TYPE_ETH] = {
490                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
491                                        RTE_FLOW_ITEM_TYPE_IPV4),
492                 .mask_support = &(const struct rte_flow_item_eth){
493                         /* Only destination MAC can be matched. */
494                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
495                 },
496                 .mask_default = &rte_flow_item_eth_mask,
497                 .mask_sz = sizeof(struct rte_flow_item_eth),
498                 .merge = mlx4_flow_merge_eth,
499                 .dst_sz = sizeof(struct ibv_flow_spec_eth),
500         },
501         [RTE_FLOW_ITEM_TYPE_VLAN] = {
502                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
503                 .mask_support = &(const struct rte_flow_item_vlan){
504                         /* Only TCI VID matching is supported. */
505                         .tci = RTE_BE16(0x0fff),
506                 },
507                 .mask_default = &rte_flow_item_vlan_mask,
508                 .mask_sz = sizeof(struct rte_flow_item_vlan),
509                 .merge = mlx4_flow_merge_vlan,
510                 .dst_sz = 0,
511         },
512         [RTE_FLOW_ITEM_TYPE_IPV4] = {
513                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
514                                        RTE_FLOW_ITEM_TYPE_TCP),
515                 .mask_support = &(const struct rte_flow_item_ipv4){
516                         .hdr = {
517                                 .src_addr = RTE_BE32(0xffffffff),
518                                 .dst_addr = RTE_BE32(0xffffffff),
519                         },
520                 },
521                 .mask_default = &rte_flow_item_ipv4_mask,
522                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
523                 .merge = mlx4_flow_merge_ipv4,
524                 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
525         },
526         [RTE_FLOW_ITEM_TYPE_UDP] = {
527                 .mask_support = &(const struct rte_flow_item_udp){
528                         .hdr = {
529                                 .src_port = RTE_BE16(0xffff),
530                                 .dst_port = RTE_BE16(0xffff),
531                         },
532                 },
533                 .mask_default = &rte_flow_item_udp_mask,
534                 .mask_sz = sizeof(struct rte_flow_item_udp),
535                 .merge = mlx4_flow_merge_udp,
536                 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
537         },
538         [RTE_FLOW_ITEM_TYPE_TCP] = {
539                 .mask_support = &(const struct rte_flow_item_tcp){
540                         .hdr = {
541                                 .src_port = RTE_BE16(0xffff),
542                                 .dst_port = RTE_BE16(0xffff),
543                         },
544                 },
545                 .mask_default = &rte_flow_item_tcp_mask,
546                 .mask_sz = sizeof(struct rte_flow_item_tcp),
547                 .merge = mlx4_flow_merge_tcp,
548                 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
549         },
550 };
551
552 /**
553  * Make sure a flow rule is supported and initialize associated structure.
554  *
555  * @param priv
556  *   Pointer to private structure.
557  * @param[in] attr
558  *   Flow rule attributes.
559  * @param[in] pattern
560  *   Pattern specification (list terminated by the END pattern item).
561  * @param[in] actions
562  *   Associated actions (list terminated by the END action).
563  * @param[out] error
564  *   Perform verbose error reporting if not NULL.
565  * @param[in, out] addr
566  *   Buffer where the resulting flow rule handle pointer must be stored.
567  *   If NULL, stop processing after validation stage.
568  *
569  * @return
570  *   0 on success, a negative errno value otherwise and rte_errno is set.
571  */
572 static int
573 mlx4_flow_prepare(struct priv *priv,
574                   const struct rte_flow_attr *attr,
575                   const struct rte_flow_item pattern[],
576                   const struct rte_flow_action actions[],
577                   struct rte_flow_error *error,
578                   struct rte_flow **addr)
579 {
580         const struct rte_flow_item *item;
581         const struct rte_flow_action *action;
582         const struct mlx4_flow_proc_item *proc;
583         struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
584         struct rte_flow *flow = &temp;
585         const char *msg = NULL;
586
587         if (attr->group)
588                 return rte_flow_error_set
589                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
590                          NULL, "groups are not supported");
591         if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
592                 return rte_flow_error_set
593                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
594                          NULL, "maximum priority level is "
595                          MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
596         if (attr->egress)
597                 return rte_flow_error_set
598                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
599                          NULL, "egress is not supported");
600         if (!attr->ingress)
601                 return rte_flow_error_set
602                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
603                          NULL, "only ingress is supported");
604 fill:
605         proc = mlx4_flow_proc_item_list;
606         /* Go over pattern. */
607         for (item = pattern; item->type; ++item) {
608                 const struct mlx4_flow_proc_item *next = NULL;
609                 unsigned int i;
610                 int err;
611
612                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
613                         continue;
614                 if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
615                         flow->internal = 1;
616                         continue;
617                 }
618                 if (flow->promisc) {
619                         msg = "mlx4 does not support additional matching"
620                                 " criteria combined with indiscriminate"
621                                 " matching on Ethernet headers";
622                         goto exit_item_not_supported;
623                 }
624                 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
625                         if (proc->next_item[i] == item->type) {
626                                 next = &mlx4_flow_proc_item_list[item->type];
627                                 break;
628                         }
629                 }
630                 if (!next)
631                         goto exit_item_not_supported;
632                 proc = next;
633                 /*
634                  * Perform basic sanity checks only once, while handle is
635                  * not allocated.
636                  */
637                 if (flow == &temp) {
638                         err = mlx4_flow_item_check(item, proc, error);
639                         if (err)
640                                 return err;
641                 }
642                 if (proc->merge) {
643                         err = proc->merge(flow, item, proc, error);
644                         if (err)
645                                 return err;
646                 }
647                 flow->ibv_attr_size += proc->dst_sz;
648         }
649         /* Go over actions list. */
650         for (action = actions; action->type; ++action) {
651                 switch (action->type) {
652                         const struct rte_flow_action_queue *queue;
653
654                 case RTE_FLOW_ACTION_TYPE_VOID:
655                         continue;
656                 case RTE_FLOW_ACTION_TYPE_DROP:
657                         flow->drop = 1;
658                         break;
659                 case RTE_FLOW_ACTION_TYPE_QUEUE:
660                         queue = action->conf;
661                         if (queue->index >= priv->dev->data->nb_rx_queues)
662                                 goto exit_action_not_supported;
663                         flow->queue = 1;
664                         flow->queue_id = queue->index;
665                         break;
666                 default:
667                         goto exit_action_not_supported;
668                 }
669         }
670         if (!flow->queue && !flow->drop)
671                 return rte_flow_error_set
672                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
673                          NULL, "no valid action");
674         /* Validation ends here. */
675         if (!addr)
676                 return 0;
677         if (flow == &temp) {
678                 /* Allocate proper handle based on collected data. */
679                 const struct mlx4_malloc_vec vec[] = {
680                         {
681                                 .align = alignof(struct rte_flow),
682                                 .size = sizeof(*flow),
683                                 .addr = (void **)&flow,
684                         },
685                         {
686                                 .align = alignof(struct ibv_flow_attr),
687                                 .size = temp.ibv_attr_size,
688                                 .addr = (void **)&temp.ibv_attr,
689                         },
690                 };
691
692                 if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec)))
693                         return rte_flow_error_set
694                                 (error, -rte_errno,
695                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
696                                  "flow rule handle allocation failure");
697                 /* Most fields will be updated by second pass. */
698                 *flow = (struct rte_flow){
699                         .ibv_attr = temp.ibv_attr,
700                         .ibv_attr_size = sizeof(*flow->ibv_attr),
701                 };
702                 *flow->ibv_attr = (struct ibv_flow_attr){
703                         .type = IBV_FLOW_ATTR_NORMAL,
704                         .size = sizeof(*flow->ibv_attr),
705                         .priority = attr->priority,
706                         .port = priv->port,
707                 };
708                 goto fill;
709         }
710         *addr = flow;
711         return 0;
712 exit_item_not_supported:
713         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
714                                   item, msg ? msg : "item not supported");
715 exit_action_not_supported:
716         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
717                                   action, "action not supported");
718 }
719
720 /**
721  * Validate a flow supported by the NIC.
722  *
723  * @see rte_flow_validate()
724  * @see rte_flow_ops
725  */
726 static int
727 mlx4_flow_validate(struct rte_eth_dev *dev,
728                    const struct rte_flow_attr *attr,
729                    const struct rte_flow_item pattern[],
730                    const struct rte_flow_action actions[],
731                    struct rte_flow_error *error)
732 {
733         struct priv *priv = dev->data->dev_private;
734
735         return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
736 }
737
738 /**
739  * Get a drop flow rule resources instance.
740  *
741  * @param priv
742  *   Pointer to private structure.
743  *
744  * @return
745  *   Pointer to drop flow resources on success, NULL otherwise and rte_errno
746  *   is set.
747  */
748 static struct mlx4_drop *
749 mlx4_drop_get(struct priv *priv)
750 {
751         struct mlx4_drop *drop = priv->drop;
752
753         if (drop) {
754                 assert(drop->refcnt);
755                 assert(drop->priv == priv);
756                 ++drop->refcnt;
757                 return drop;
758         }
759         drop = rte_malloc(__func__, sizeof(*drop), 0);
760         if (!drop)
761                 goto error;
762         *drop = (struct mlx4_drop){
763                 .priv = priv,
764                 .refcnt = 1,
765         };
766         drop->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
767         if (!drop->cq)
768                 goto error;
769         drop->qp = ibv_create_qp(priv->pd,
770                                  &(struct ibv_qp_init_attr){
771                                         .send_cq = drop->cq,
772                                         .recv_cq = drop->cq,
773                                         .qp_type = IBV_QPT_RAW_PACKET,
774                                  });
775         if (!drop->qp)
776                 goto error;
777         priv->drop = drop;
778         return drop;
779 error:
780         if (drop->qp)
781                 claim_zero(ibv_destroy_qp(drop->qp));
782         if (drop->cq)
783                 claim_zero(ibv_destroy_cq(drop->cq));
784         if (drop)
785                 rte_free(drop);
786         rte_errno = ENOMEM;
787         return NULL;
788 }
789
790 /**
791  * Give back a drop flow rule resources instance.
792  *
793  * @param drop
794  *   Pointer to drop flow rule resources.
795  */
796 static void
797 mlx4_drop_put(struct mlx4_drop *drop)
798 {
799         assert(drop->refcnt);
800         if (--drop->refcnt)
801                 return;
802         drop->priv->drop = NULL;
803         claim_zero(ibv_destroy_qp(drop->qp));
804         claim_zero(ibv_destroy_cq(drop->cq));
805         rte_free(drop);
806 }
807
808 /**
809  * Toggle a configured flow rule.
810  *
811  * @param priv
812  *   Pointer to private structure.
813  * @param flow
814  *   Flow rule handle to toggle.
815  * @param enable
816  *   Whether associated Verbs flow must be created or removed.
817  * @param[out] error
818  *   Perform verbose error reporting if not NULL.
819  *
820  * @return
821  *   0 on success, a negative errno value otherwise and rte_errno is set.
822  */
823 static int
824 mlx4_flow_toggle(struct priv *priv,
825                  struct rte_flow *flow,
826                  int enable,
827                  struct rte_flow_error *error)
828 {
829         struct ibv_qp *qp = NULL;
830         const char *msg;
831         int err;
832
833         if (!enable) {
834                 if (!flow->ibv_flow)
835                         return 0;
836                 claim_zero(ibv_destroy_flow(flow->ibv_flow));
837                 flow->ibv_flow = NULL;
838                 if (flow->drop)
839                         mlx4_drop_put(priv->drop);
840                 return 0;
841         }
842         assert(flow->ibv_attr);
843         if (!flow->internal &&
844             !priv->isolated &&
845             flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
846                 if (flow->ibv_flow) {
847                         claim_zero(ibv_destroy_flow(flow->ibv_flow));
848                         flow->ibv_flow = NULL;
849                         if (flow->drop)
850                                 mlx4_drop_put(priv->drop);
851                 }
852                 err = EACCES;
853                 msg = ("priority level "
854                        MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
855                        " is reserved when not in isolated mode");
856                 goto error;
857         }
858         if (flow->queue) {
859                 struct rxq *rxq = NULL;
860
861                 if (flow->queue_id < priv->dev->data->nb_rx_queues)
862                         rxq = priv->dev->data->rx_queues[flow->queue_id];
863                 if (flow->ibv_flow) {
864                         if (!rxq ^ !flow->drop)
865                                 return 0;
866                         /* Verbs flow needs updating. */
867                         claim_zero(ibv_destroy_flow(flow->ibv_flow));
868                         flow->ibv_flow = NULL;
869                         if (flow->drop)
870                                 mlx4_drop_put(priv->drop);
871                 }
872                 if (rxq)
873                         qp = rxq->qp;
874                 /* A missing target queue drops traffic implicitly. */
875                 flow->drop = !rxq;
876         }
877         if (flow->drop) {
878                 mlx4_drop_get(priv);
879                 if (!priv->drop) {
880                         err = rte_errno;
881                         msg = "resources for drop flow rule cannot be created";
882                         goto error;
883                 }
884                 qp = priv->drop->qp;
885         }
886         assert(qp);
887         if (flow->ibv_flow)
888                 return 0;
889         flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
890         if (flow->ibv_flow)
891                 return 0;
892         if (flow->drop)
893                 mlx4_drop_put(priv->drop);
894         err = errno;
895         msg = "flow rule rejected by device";
896 error:
897         return rte_flow_error_set
898                 (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
899 }
900
901 /**
902  * Create a flow.
903  *
904  * @see rte_flow_create()
905  * @see rte_flow_ops
906  */
907 static struct rte_flow *
908 mlx4_flow_create(struct rte_eth_dev *dev,
909                  const struct rte_flow_attr *attr,
910                  const struct rte_flow_item pattern[],
911                  const struct rte_flow_action actions[],
912                  struct rte_flow_error *error)
913 {
914         struct priv *priv = dev->data->dev_private;
915         struct rte_flow *flow;
916         int err;
917
918         err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
919         if (err)
920                 return NULL;
921         err = mlx4_flow_toggle(priv, flow, priv->started, error);
922         if (!err) {
923                 struct rte_flow *curr = LIST_FIRST(&priv->flows);
924
925                 /* New rules are inserted after internal ones. */
926                 if (!curr || !curr->internal) {
927                         LIST_INSERT_HEAD(&priv->flows, flow, next);
928                 } else {
929                         while (LIST_NEXT(curr, next) &&
930                                LIST_NEXT(curr, next)->internal)
931                                 curr = LIST_NEXT(curr, next);
932                         LIST_INSERT_AFTER(curr, flow, next);
933                 }
934                 return flow;
935         }
936         rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
937                            error->message);
938         rte_free(flow);
939         return NULL;
940 }
941
942 /**
943  * Configure isolated mode.
944  *
945  * @see rte_flow_isolate()
946  * @see rte_flow_ops
947  */
948 static int
949 mlx4_flow_isolate(struct rte_eth_dev *dev,
950                   int enable,
951                   struct rte_flow_error *error)
952 {
953         struct priv *priv = dev->data->dev_private;
954
955         if (!!enable == !!priv->isolated)
956                 return 0;
957         priv->isolated = !!enable;
958         if (mlx4_flow_sync(priv, error)) {
959                 priv->isolated = !enable;
960                 return -rte_errno;
961         }
962         return 0;
963 }
964
965 /**
966  * Destroy a flow rule.
967  *
968  * @see rte_flow_destroy()
969  * @see rte_flow_ops
970  */
971 static int
972 mlx4_flow_destroy(struct rte_eth_dev *dev,
973                   struct rte_flow *flow,
974                   struct rte_flow_error *error)
975 {
976         struct priv *priv = dev->data->dev_private;
977         int err = mlx4_flow_toggle(priv, flow, 0, error);
978
979         if (err)
980                 return err;
981         LIST_REMOVE(flow, next);
982         rte_free(flow);
983         return 0;
984 }
985
986 /**
987  * Destroy user-configured flow rules.
988  *
989  * This function skips internal flows rules.
990  *
991  * @see rte_flow_flush()
992  * @see rte_flow_ops
993  */
994 static int
995 mlx4_flow_flush(struct rte_eth_dev *dev,
996                 struct rte_flow_error *error)
997 {
998         struct priv *priv = dev->data->dev_private;
999         struct rte_flow *flow = LIST_FIRST(&priv->flows);
1000
1001         while (flow) {
1002                 struct rte_flow *next = LIST_NEXT(flow, next);
1003
1004                 if (!flow->internal)
1005                         mlx4_flow_destroy(dev, flow, error);
1006                 flow = next;
1007         }
1008         return 0;
1009 }
1010
1011 /**
1012  * Helper function to determine the next configured VLAN filter.
1013  *
1014  * @param priv
1015  *   Pointer to private structure.
1016  * @param vlan
1017  *   VLAN ID to use as a starting point.
1018  *
1019  * @return
1020  *   Next configured VLAN ID or a high value (>= 4096) if there is none.
1021  */
1022 static uint16_t
1023 mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
1024 {
1025         while (vlan < 4096) {
1026                 if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
1027                     (UINT64_C(1) << (vlan % 64)))
1028                         return vlan;
1029                 ++vlan;
1030         }
1031         return vlan;
1032 }
1033
1034 /**
1035  * Generate internal flow rules.
1036  *
1037  * - MAC flow rules are generated from @p dev->data->mac_addrs
1038  *   (@p priv->mac array).
1039  * - An additional flow rule for Ethernet broadcasts is also generated.
1040  * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
1041  *   is enabled and VLAN filters are configured.
1042  *
1043  * @param priv
1044  *   Pointer to private structure.
1045  * @param[out] error
1046  *   Perform verbose error reporting if not NULL.
1047  *
1048  * @return
1049  *   0 on success, a negative errno value otherwise and rte_errno is set.
1050  */
1051 static int
1052 mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
1053 {
1054         struct rte_flow_attr attr = {
1055                 .priority = MLX4_FLOW_PRIORITY_LAST,
1056                 .ingress = 1,
1057         };
1058         struct rte_flow_item_eth eth_spec;
1059         const struct rte_flow_item_eth eth_mask = {
1060                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1061         };
1062         struct rte_flow_item_vlan vlan_spec;
1063         const struct rte_flow_item_vlan vlan_mask = {
1064                 .tci = RTE_BE16(0x0fff),
1065         };
1066         struct rte_flow_item pattern[] = {
1067                 {
1068                         .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
1069                 },
1070                 {
1071                         .type = RTE_FLOW_ITEM_TYPE_ETH,
1072                         .spec = &eth_spec,
1073                         .mask = &eth_mask,
1074                 },
1075                 {
1076                         /* Replaced with VLAN if filtering is enabled. */
1077                         .type = RTE_FLOW_ITEM_TYPE_END,
1078                 },
1079                 {
1080                         .type = RTE_FLOW_ITEM_TYPE_END,
1081                 },
1082         };
1083         struct rte_flow_action actions[] = {
1084                 {
1085                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1086                         .conf = &(struct rte_flow_action_queue){
1087                                 .index = 0,
1088                         },
1089                 },
1090                 {
1091                         .type = RTE_FLOW_ACTION_TYPE_END,
1092                 },
1093         };
1094         struct ether_addr *rule_mac = &eth_spec.dst;
1095         rte_be16_t *rule_vlan =
1096                 priv->dev->data->dev_conf.rxmode.hw_vlan_filter ?
1097                 &vlan_spec.tci :
1098                 NULL;
1099         uint16_t vlan = 0;
1100         struct rte_flow *flow;
1101         unsigned int i;
1102         int err = 0;
1103
1104         /*
1105          * Set up VLAN item if filtering is enabled and at least one VLAN
1106          * filter is configured.
1107          */
1108         if (rule_vlan) {
1109                 vlan = mlx4_flow_internal_next_vlan(priv, 0);
1110                 if (vlan < 4096) {
1111                         pattern[2] = (struct rte_flow_item){
1112                                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1113                                 .spec = &vlan_spec,
1114                                 .mask = &vlan_mask,
1115                         };
1116 next_vlan:
1117                         *rule_vlan = rte_cpu_to_be_16(vlan);
1118                 } else {
1119                         rule_vlan = NULL;
1120                 }
1121         }
1122         for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
1123                 const struct ether_addr *mac;
1124
1125                 /* Broadcasts are handled by an extra iteration. */
1126                 if (i < RTE_DIM(priv->mac))
1127                         mac = &priv->mac[i];
1128                 else
1129                         mac = &eth_mask.dst;
1130                 if (is_zero_ether_addr(mac))
1131                         continue;
1132                 /* Check if MAC flow rule is already present. */
1133                 for (flow = LIST_FIRST(&priv->flows);
1134                      flow && flow->internal;
1135                      flow = LIST_NEXT(flow, next)) {
1136                         const struct ibv_flow_spec_eth *eth =
1137                                 (const void *)((uintptr_t)flow->ibv_attr +
1138                                                sizeof(*flow->ibv_attr));
1139                         unsigned int j;
1140
1141                         if (!flow->mac)
1142                                 continue;
1143                         assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
1144                         assert(flow->ibv_attr->num_of_specs == 1);
1145                         assert(eth->type == IBV_FLOW_SPEC_ETH);
1146                         if (rule_vlan &&
1147                             (eth->val.vlan_tag != *rule_vlan ||
1148                              eth->mask.vlan_tag != RTE_BE16(0x0fff)))
1149                                 continue;
1150                         if (!rule_vlan && eth->mask.vlan_tag)
1151                                 continue;
1152                         for (j = 0; j != sizeof(mac->addr_bytes); ++j)
1153                                 if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
1154                                     eth->mask.dst_mac[j] != UINT8_C(0xff) ||
1155                                     eth->val.src_mac[j] != UINT8_C(0x00) ||
1156                                     eth->mask.src_mac[j] != UINT8_C(0x00))
1157                                         break;
1158                         if (j == sizeof(mac->addr_bytes))
1159                                 break;
1160                 }
1161                 if (!flow || !flow->internal) {
1162                         /* Not found, create a new flow rule. */
1163                         memcpy(rule_mac, mac, sizeof(*mac));
1164                         flow = mlx4_flow_create(priv->dev, &attr, pattern,
1165                                                 actions, error);
1166                         if (!flow) {
1167                                 err = -rte_errno;
1168                                 break;
1169                         }
1170                 }
1171                 flow->select = 1;
1172                 flow->mac = 1;
1173         }
1174         if (!err && rule_vlan) {
1175                 vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
1176                 if (vlan < 4096)
1177                         goto next_vlan;
1178         }
1179         /* Clear selection and clean up stale MAC flow rules. */
1180         flow = LIST_FIRST(&priv->flows);
1181         while (flow && flow->internal) {
1182                 struct rte_flow *next = LIST_NEXT(flow, next);
1183
1184                 if (flow->mac && !flow->select)
1185                         claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1186                 else
1187                         flow->select = 0;
1188                 flow = next;
1189         }
1190         return err;
1191 }
1192
1193 /**
1194  * Synchronize flow rules.
1195  *
1196  * This function synchronizes flow rules with the state of the device by
1197  * taking into account isolated mode and whether target queues are
1198  * configured.
1199  *
1200  * @param priv
1201  *   Pointer to private structure.
1202  * @param[out] error
1203  *   Perform verbose error reporting if not NULL.
1204  *
1205  * @return
1206  *   0 on success, a negative errno value otherwise and rte_errno is set.
1207  */
1208 int
1209 mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
1210 {
1211         struct rte_flow *flow;
1212         int ret;
1213
1214         /* Internal flow rules are guaranteed to come first in the list. */
1215         if (priv->isolated) {
1216                 /*
1217                  * Get rid of them in isolated mode, stop at the first
1218                  * non-internal rule found.
1219                  */
1220                 for (flow = LIST_FIRST(&priv->flows);
1221                      flow && flow->internal;
1222                      flow = LIST_FIRST(&priv->flows))
1223                         claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1224         } else {
1225                 /* Refresh internal rules. */
1226                 ret = mlx4_flow_internal(priv, error);
1227                 if (ret)
1228                         return ret;
1229         }
1230         /* Toggle the remaining flow rules . */
1231         for (flow = LIST_FIRST(&priv->flows);
1232              flow;
1233              flow = LIST_NEXT(flow, next)) {
1234                 ret = mlx4_flow_toggle(priv, flow, priv->started, error);
1235                 if (ret)
1236                         return ret;
1237         }
1238         if (!priv->started)
1239                 assert(!priv->drop);
1240         return 0;
1241 }
1242
1243 /**
1244  * Clean up all flow rules.
1245  *
1246  * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
1247  * rules regardless of whether they are internal or user-configured.
1248  *
1249  * @param priv
1250  *   Pointer to private structure.
1251  */
1252 void
1253 mlx4_flow_clean(struct priv *priv)
1254 {
1255         struct rte_flow *flow;
1256
1257         while ((flow = LIST_FIRST(&priv->flows)))
1258                 mlx4_flow_destroy(priv->dev, flow, NULL);
1259 }
1260
1261 static const struct rte_flow_ops mlx4_flow_ops = {
1262         .validate = mlx4_flow_validate,
1263         .create = mlx4_flow_create,
1264         .destroy = mlx4_flow_destroy,
1265         .flush = mlx4_flow_flush,
1266         .isolate = mlx4_flow_isolate,
1267 };
1268
1269 /**
1270  * Manage filter operations.
1271  *
1272  * @param dev
1273  *   Pointer to Ethernet device structure.
1274  * @param filter_type
1275  *   Filter type.
1276  * @param filter_op
1277  *   Operation to perform.
1278  * @param arg
1279  *   Pointer to operation-specific structure.
1280  *
1281  * @return
1282  *   0 on success, negative errno value otherwise and rte_errno is set.
1283  */
1284 int
1285 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1286                  enum rte_filter_type filter_type,
1287                  enum rte_filter_op filter_op,
1288                  void *arg)
1289 {
1290         switch (filter_type) {
1291         case RTE_ETH_FILTER_GENERIC:
1292                 if (filter_op != RTE_ETH_FILTER_GET)
1293                         break;
1294                 *(const void **)arg = &mlx4_flow_ops;
1295                 return 0;
1296         default:
1297                 ERROR("%p: filter type (%d) not supported",
1298                       (void *)dev, filter_type);
1299                 break;
1300         }
1301         rte_errno = ENOTSUP;
1302         return -rte_errno;
1303 }