net/mlx5: add flow IPv6 item
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <sys/queue.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_prm.h"
32 #include "mlx5_glue.h"
33
34 /* Dev ops structure defined in mlx5.c */
35 extern const struct eth_dev_ops mlx5_dev_ops;
36 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
37
38 /* Pattern Layer bits. */
39 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
40 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
41 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
42 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
43 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
44 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
45 /* Masks. */
46 #define MLX5_FLOW_LAYER_OUTER_L3 \
47         (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
48 #define MLX5_FLOW_LAYER_OUTER_L4 \
49         (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
50
51 /* Actions that modify the fate of matching traffic. */
52 #define MLX5_FLOW_FATE_DROP (1u << 0)
53 #define MLX5_FLOW_FATE_QUEUE (1u << 1)
54
55 /** Handles information leading to a drop fate. */
56 struct mlx5_flow_verbs {
57         unsigned int size; /**< Size of the attribute. */
58         struct {
59                 struct ibv_flow_attr *attr;
60                 /**< Pointer to the Specification buffer. */
61                 uint8_t *specs; /**< Pointer to the specifications. */
62         };
63         struct ibv_flow *flow; /**< Verbs flow pointer. */
64         struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
65 };
66
67 /* Flow structure. */
68 struct rte_flow {
69         TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
70         struct rte_flow_attr attributes; /**< User flow attribute. */
71         uint32_t layers;
72         /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
73         uint32_t fate;
74         /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
75         struct mlx5_flow_verbs verbs; /* Verbs flow. */
76         uint16_t queue; /**< Destination queue to redirect traffic to. */
77 };
78
79 static const struct rte_flow_ops mlx5_flow_ops = {
80         .validate = mlx5_flow_validate,
81         .create = mlx5_flow_create,
82         .destroy = mlx5_flow_destroy,
83         .flush = mlx5_flow_flush,
84         .isolate = mlx5_flow_isolate,
85 };
86
87 /* Convert FDIR request to Generic flow. */
88 struct mlx5_fdir {
89         struct rte_flow_attr attr;
90         struct rte_flow_action actions[2];
91         struct rte_flow_item items[4];
92         struct rte_flow_item_eth l2;
93         struct rte_flow_item_eth l2_mask;
94         union {
95                 struct rte_flow_item_ipv4 ipv4;
96                 struct rte_flow_item_ipv6 ipv6;
97         } l3;
98         union {
99                 struct rte_flow_item_ipv4 ipv4;
100                 struct rte_flow_item_ipv6 ipv6;
101         } l3_mask;
102         union {
103                 struct rte_flow_item_udp udp;
104                 struct rte_flow_item_tcp tcp;
105         } l4;
106         union {
107                 struct rte_flow_item_udp udp;
108                 struct rte_flow_item_tcp tcp;
109         } l4_mask;
110         struct rte_flow_action_queue queue;
111 };
112
113 /* Verbs specification header. */
114 struct ibv_spec_header {
115         enum ibv_flow_spec_type type;
116         uint16_t size;
117 };
118
119  /**
120   * Discover the maximum number of priority available.
121   *
122   * @param[in] dev
123   *   Pointer to Ethernet device.
124   *
125   * @return
126   *   number of supported flow priority on success, a negative errno value
127   *   otherwise and rte_errno is set.
128   */
129 int
130 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
131 {
132         struct {
133                 struct ibv_flow_attr attr;
134                 struct ibv_flow_spec_eth eth;
135                 struct ibv_flow_spec_action_drop drop;
136         } flow_attr = {
137                 .attr = {
138                         .num_of_specs = 2,
139                 },
140                 .eth = {
141                         .type = IBV_FLOW_SPEC_ETH,
142                         .size = sizeof(struct ibv_flow_spec_eth),
143                 },
144                 .drop = {
145                         .size = sizeof(struct ibv_flow_spec_action_drop),
146                         .type = IBV_FLOW_SPEC_ACTION_DROP,
147                 },
148         };
149         struct ibv_flow *flow;
150         struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
151         uint16_t vprio[] = { 8, 16 };
152         int i;
153
154         if (!drop) {
155                 rte_errno = ENOTSUP;
156                 return -rte_errno;
157         }
158         for (i = 0; i != RTE_DIM(vprio); i++) {
159                 flow_attr.attr.priority = vprio[i] - 1;
160                 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
161                 if (!flow)
162                         break;
163                 claim_zero(mlx5_glue->destroy_flow(flow));
164         }
165         mlx5_hrxq_drop_release(dev);
166         DRV_LOG(INFO, "port %u flow maximum priority: %d",
167                 dev->data->port_id, vprio[i - 1]);
168         return vprio[i - 1];
169 }
170
171 /**
172  * Verify the @p attributes will be correctly understood by the NIC and store
173  * them in the @p flow if everything is correct.
174  *
175  * @param[in] dev
176  *   Pointer to Ethernet device.
177  * @param[in] attributes
178  *   Pointer to flow attributes
179  * @param[in, out] flow
180  *   Pointer to the rte_flow structure.
181  * @param[out] error
182  *   Pointer to error structure.
183  *
184  * @return
185  *   0 on success, a negative errno value otherwise and rte_errno is set.
186  */
187 static int
188 mlx5_flow_attributes(struct rte_eth_dev *dev,
189                      const struct rte_flow_attr *attributes,
190                      struct rte_flow *flow,
191                      struct rte_flow_error *error)
192 {
193         uint32_t priority_max =
194                 ((struct priv *)dev->data->dev_private)->config.flow_prio;
195
196         if (attributes->group)
197                 return rte_flow_error_set(error, ENOTSUP,
198                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
199                                           NULL,
200                                           "groups is not supported");
201         if (attributes->priority >= priority_max)
202                 return rte_flow_error_set(error, ENOTSUP,
203                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
204                                           NULL,
205                                           "priority out of range");
206         if (attributes->egress)
207                 return rte_flow_error_set(error, ENOTSUP,
208                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
209                                           NULL,
210                                           "egress is not supported");
211         if (attributes->transfer)
212                 return rte_flow_error_set(error, ENOTSUP,
213                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
214                                           NULL,
215                                           "transfer is not supported");
216         if (!attributes->ingress)
217                 return rte_flow_error_set(error, ENOTSUP,
218                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
219                                           NULL,
220                                           "ingress attribute is mandatory");
221         flow->attributes = *attributes;
222         return 0;
223 }
224
225 /**
226  * Verify the @p item specifications (spec, last, mask) are compatible with the
227  * NIC capabilities.
228  *
229  * @param[in] item
230  *   Item specification.
231  * @param[in] mask
232  *   @p item->mask or flow default bit-masks.
233  * @param[in] nic_mask
234  *   Bit-masks covering supported fields by the NIC to compare with user mask.
235  * @param[in] size
236  *   Bit-masks size in bytes.
237  * @param[out] error
238  *   Pointer to error structure.
239  *
240  * @return
241  *   0 on success, a negative errno value otherwise and rte_errno is set.
242  */
243 static int
244 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
245                           const uint8_t *mask,
246                           const uint8_t *nic_mask,
247                           unsigned int size,
248                           struct rte_flow_error *error)
249 {
250         unsigned int i;
251
252         assert(nic_mask);
253         for (i = 0; i < size; ++i)
254                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
255                         return rte_flow_error_set(error, ENOTSUP,
256                                                   RTE_FLOW_ERROR_TYPE_ITEM,
257                                                   item,
258                                                   "mask enables non supported"
259                                                   " bits");
260         if (!item->spec && (item->mask || item->last))
261                 return rte_flow_error_set(error, EINVAL,
262                                           RTE_FLOW_ERROR_TYPE_ITEM,
263                                           item,
264                                           "mask/last without a spec is not"
265                                           " supported");
266         if (item->spec && item->last) {
267                 uint8_t spec[size];
268                 uint8_t last[size];
269                 unsigned int i;
270                 int ret;
271
272                 for (i = 0; i < size; ++i) {
273                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
274                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
275                 }
276                 ret = memcmp(spec, last, size);
277                 if (ret != 0)
278                         return rte_flow_error_set(error, ENOTSUP,
279                                                   RTE_FLOW_ERROR_TYPE_ITEM,
280                                                   item,
281                                                   "range is not supported");
282         }
283         return 0;
284 }
285
286 /**
287  * Add a verbs specification into @p flow.
288  *
289  * @param[in, out] flow
290  *   Pointer to flow structure.
291  * @param[in] src
292  *   Create specification.
293  * @param[in] size
294  *   Size in bytes of the specification to copy.
295  */
296 static void
297 mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
298 {
299         if (flow->verbs.specs) {
300                 void *dst;
301
302                 dst = (void *)(flow->verbs.specs + flow->verbs.size);
303                 memcpy(dst, src, size);
304                 ++flow->verbs.attr->num_of_specs;
305         }
306         flow->verbs.size += size;
307 }
308
309 /**
310  * Convert the @p item into a Verbs specification after ensuring the NIC
311  * will understand and process it correctly.
312  * If the necessary size for the conversion is greater than the @p flow_size,
313  * nothing is written in @p flow, the validation is still performed.
314  *
315  * @param[in] item
316  *   Item specification.
317  * @param[in, out] flow
318  *   Pointer to flow structure.
319  * @param[in] flow_size
320  *   Size in bytes of the available space in @p flow, if too small, nothing is
321  *   written.
322  * @param[out] error
323  *   Pointer to error structure.
324  *
325  * @return
326  *   On success the number of bytes consumed/necessary, if the returned value
327  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
328  *   otherwise another call with this returned memory size should be done.
329  *   On error, a negative errno value is returned and rte_errno is set.
330  */
331 static int
332 mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
333                    const size_t flow_size, struct rte_flow_error *error)
334 {
335         const struct rte_flow_item_eth *spec = item->spec;
336         const struct rte_flow_item_eth *mask = item->mask;
337         const struct rte_flow_item_eth nic_mask = {
338                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
339                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
340                 .type = RTE_BE16(0xffff),
341         };
342         const unsigned int size = sizeof(struct ibv_flow_spec_eth);
343         struct ibv_flow_spec_eth eth = {
344                 .type = IBV_FLOW_SPEC_ETH,
345                 .size = size,
346         };
347         int ret;
348
349         if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)
350                 return rte_flow_error_set(error, ENOTSUP,
351                                           RTE_FLOW_ERROR_TYPE_ITEM,
352                                           item,
353                                           "L2 layers already configured");
354         if (!mask)
355                 mask = &rte_flow_item_eth_mask;
356         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
357                                         (const uint8_t *)&nic_mask,
358                                         sizeof(struct rte_flow_item_eth),
359                                         error);
360         if (ret)
361                 return ret;
362         flow->layers |= MLX5_FLOW_LAYER_OUTER_L2;
363         if (size > flow_size)
364                 return size;
365         if (spec) {
366                 unsigned int i;
367
368                 memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
369                 memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
370                 eth.val.ether_type = spec->type;
371                 memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
372                 memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
373                 eth.mask.ether_type = mask->type;
374                 /* Remove unwanted bits from values. */
375                 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
376                         eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
377                         eth.val.src_mac[i] &= eth.mask.src_mac[i];
378                 }
379                 eth.val.ether_type &= eth.mask.ether_type;
380         }
381         mlx5_flow_spec_verbs_add(flow, &eth, size);
382         return size;
383 }
384
385 /**
386  * Update the VLAN tag in the Verbs Ethernet specification.
387  *
388  * @param[in, out] attr
389  *   Pointer to Verbs attributes structure.
390  * @param[in] eth
391  *   Verbs structure containing the VLAN information to copy.
392  */
393 static void
394 mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
395                            struct ibv_flow_spec_eth *eth)
396 {
397         unsigned int i;
398         enum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH;
399         struct ibv_spec_header *hdr = (struct ibv_spec_header *)
400                 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
401
402         for (i = 0; i != attr->num_of_specs; ++i) {
403                 if (hdr->type == search) {
404                         struct ibv_flow_spec_eth *e =
405                                 (struct ibv_flow_spec_eth *)hdr;
406
407                         e->val.vlan_tag = eth->val.vlan_tag;
408                         e->mask.vlan_tag = eth->mask.vlan_tag;
409                         e->val.ether_type = eth->val.ether_type;
410                         e->mask.ether_type = eth->mask.ether_type;
411                         break;
412                 }
413                 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
414         }
415 }
416
417 /**
418  * Convert the @p item into @p flow (or by updating the already present
419  * Ethernet Verbs) specification after ensuring the NIC will understand and
420  * process it correctly.
421  * If the necessary size for the conversion is greater than the @p flow_size,
422  * nothing is written in @p flow, the validation is still performed.
423  *
424  * @param[in] item
425  *   Item specification.
426  * @param[in, out] flow
427  *   Pointer to flow structure.
428  * @param[in] flow_size
429  *   Size in bytes of the available space in @p flow, if too small, nothing is
430  *   written.
431  * @param[out] error
432  *   Pointer to error structure.
433  *
434  * @return
435  *   On success the number of bytes consumed/necessary, if the returned value
436  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
437  *   otherwise another call with this returned memory size should be done.
438  *   On error, a negative errno value is returned and rte_errno is set.
439  */
440 static int
441 mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
442                     const size_t flow_size, struct rte_flow_error *error)
443 {
444         const struct rte_flow_item_vlan *spec = item->spec;
445         const struct rte_flow_item_vlan *mask = item->mask;
446         const struct rte_flow_item_vlan nic_mask = {
447                 .tci = RTE_BE16(0x0fff),
448                 .inner_type = RTE_BE16(0xffff),
449         };
450         unsigned int size = sizeof(struct ibv_flow_spec_eth);
451         struct ibv_flow_spec_eth eth = {
452                 .type = IBV_FLOW_SPEC_ETH,
453                 .size = size,
454         };
455         int ret;
456         const uint32_t l34m = MLX5_FLOW_LAYER_OUTER_L3 |
457                         MLX5_FLOW_LAYER_OUTER_L4;
458         const uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN;
459         const uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2;
460
461         if (flow->layers & vlanm)
462                 return rte_flow_error_set(error, ENOTSUP,
463                                           RTE_FLOW_ERROR_TYPE_ITEM,
464                                           item,
465                                           "VLAN layer already configured");
466         else if ((flow->layers & l34m) != 0)
467                 return rte_flow_error_set(error, ENOTSUP,
468                                           RTE_FLOW_ERROR_TYPE_ITEM,
469                                           item,
470                                           "L2 layer cannot follow L3/L4 layer");
471         if (!mask)
472                 mask = &rte_flow_item_vlan_mask;
473         ret = mlx5_flow_item_acceptable
474                 (item, (const uint8_t *)mask,
475                  (const uint8_t *)&nic_mask,
476                  sizeof(struct rte_flow_item_vlan), error);
477         if (ret)
478                 return ret;
479         if (spec) {
480                 eth.val.vlan_tag = spec->tci;
481                 eth.mask.vlan_tag = mask->tci;
482                 eth.val.vlan_tag &= eth.mask.vlan_tag;
483                 eth.val.ether_type = spec->inner_type;
484                 eth.mask.ether_type = mask->inner_type;
485                 eth.val.ether_type &= eth.mask.ether_type;
486         }
487         /*
488          * From verbs perspective an empty VLAN is equivalent
489          * to a packet without VLAN layer.
490          */
491         if (!eth.mask.vlan_tag)
492                 return rte_flow_error_set(error, EINVAL,
493                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
494                                           item->spec,
495                                           "VLAN cannot be empty");
496         if (!(flow->layers & l2m)) {
497                 if (size <= flow_size)
498                         mlx5_flow_spec_verbs_add(flow, &eth, size);
499         } else {
500                 if (flow->verbs.attr)
501                         mlx5_flow_item_vlan_update(flow->verbs.attr, &eth);
502                 size = 0; /* Only an update is done in eth specification. */
503         }
504         flow->layers |= MLX5_FLOW_LAYER_OUTER_L2 |
505                 MLX5_FLOW_LAYER_OUTER_VLAN;
506         return size;
507 }
508
509 /**
510  * Convert the @p item into a Verbs specification after ensuring the NIC
511  * will understand and process it correctly.
512  * If the necessary size for the conversion is greater than the @p flow_size,
513  * nothing is written in @p flow, the validation is still performed.
514  *
515  * @param[in] item
516  *   Item specification.
517  * @param[in, out] flow
518  *   Pointer to flow structure.
519  * @param[in] flow_size
520  *   Size in bytes of the available space in @p flow, if too small, nothing is
521  *   written.
522  * @param[out] error
523  *   Pointer to error structure.
524  *
525  * @return
526  *   On success the number of bytes consumed/necessary, if the returned value
527  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
528  *   otherwise another call with this returned memory size should be done.
529  *   On error, a negative errno value is returned and rte_errno is set.
530  */
531 static int
532 mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
533                     const size_t flow_size, struct rte_flow_error *error)
534 {
535         const struct rte_flow_item_ipv4 *spec = item->spec;
536         const struct rte_flow_item_ipv4 *mask = item->mask;
537         const struct rte_flow_item_ipv4 nic_mask = {
538                 .hdr = {
539                         .src_addr = RTE_BE32(0xffffffff),
540                         .dst_addr = RTE_BE32(0xffffffff),
541                         .type_of_service = 0xff,
542                         .next_proto_id = 0xff,
543                 },
544         };
545         unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
546         struct ibv_flow_spec_ipv4_ext ipv4 = {
547                 .type = IBV_FLOW_SPEC_IPV4_EXT,
548                 .size = size,
549         };
550         int ret;
551
552         if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
553                 return rte_flow_error_set(error, ENOTSUP,
554                                           RTE_FLOW_ERROR_TYPE_ITEM,
555                                           item,
556                                           "multiple L3 layers not supported");
557         else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
558                 return rte_flow_error_set(error, ENOTSUP,
559                                           RTE_FLOW_ERROR_TYPE_ITEM,
560                                           item,
561                                           "L3 cannot follow an L4 layer.");
562         if (!mask)
563                 mask = &rte_flow_item_ipv4_mask;
564         ret = mlx5_flow_item_acceptable
565                 (item, (const uint8_t *)mask,
566                  (const uint8_t *)&nic_mask,
567                  sizeof(struct rte_flow_item_ipv4), error);
568         if (ret < 0)
569                 return ret;
570         flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
571         if (size > flow_size)
572                 return size;
573         if (spec) {
574                 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
575                         .src_ip = spec->hdr.src_addr,
576                         .dst_ip = spec->hdr.dst_addr,
577                         .proto = spec->hdr.next_proto_id,
578                         .tos = spec->hdr.type_of_service,
579                 };
580                 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
581                         .src_ip = mask->hdr.src_addr,
582                         .dst_ip = mask->hdr.dst_addr,
583                         .proto = mask->hdr.next_proto_id,
584                         .tos = mask->hdr.type_of_service,
585                 };
586                 /* Remove unwanted bits from values. */
587                 ipv4.val.src_ip &= ipv4.mask.src_ip;
588                 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
589                 ipv4.val.proto &= ipv4.mask.proto;
590                 ipv4.val.tos &= ipv4.mask.tos;
591         }
592         mlx5_flow_spec_verbs_add(flow, &ipv4, size);
593         return size;
594 }
595
596 /**
597  * Convert the @p item into a Verbs specification after ensuring the NIC
598  * will understand and process it correctly.
599  * If the necessary size for the conversion is greater than the @p flow_size,
600  * nothing is written in @p flow, the validation is still performed.
601  *
602  * @param[in] item
603  *   Item specification.
604  * @param[in, out] flow
605  *   Pointer to flow structure.
606  * @param[in] flow_size
607  *   Size in bytes of the available space in @p flow, if too small, nothing is
608  *   written.
609  * @param[out] error
610  *   Pointer to error structure.
611  *
612  * @return
613  *   On success the number of bytes consumed/necessary, if the returned value
614  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
615  *   otherwise another call with this returned memory size should be done.
616  *   On error, a negative errno value is returned and rte_errno is set.
617  */
618 static int
619 mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
620                     const size_t flow_size, struct rte_flow_error *error)
621 {
622         const struct rte_flow_item_ipv6 *spec = item->spec;
623         const struct rte_flow_item_ipv6 *mask = item->mask;
624         const struct rte_flow_item_ipv6 nic_mask = {
625                 .hdr = {
626                         .src_addr =
627                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
628                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
629                         .dst_addr =
630                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
631                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
632                         .vtc_flow = RTE_BE32(0xffffffff),
633                         .proto = 0xff,
634                         .hop_limits = 0xff,
635                 },
636         };
637         unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
638         struct ibv_flow_spec_ipv6 ipv6 = {
639                 .type = IBV_FLOW_SPEC_IPV6,
640                 .size = size,
641         };
642         int ret;
643
644         if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
645                 return rte_flow_error_set(error, ENOTSUP,
646                                           RTE_FLOW_ERROR_TYPE_ITEM,
647                                           item,
648                                           "multiple L3 layers not supported");
649         else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
650                 return rte_flow_error_set(error, ENOTSUP,
651                                           RTE_FLOW_ERROR_TYPE_ITEM,
652                                           item,
653                                           "L3 cannot follow an L4 layer.");
654         if (!mask)
655                 mask = &rte_flow_item_ipv6_mask;
656         ret = mlx5_flow_item_acceptable
657                 (item, (const uint8_t *)mask,
658                  (const uint8_t *)&nic_mask,
659                  sizeof(struct rte_flow_item_ipv6), error);
660         if (ret < 0)
661                 return ret;
662         flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
663         if (size > flow_size)
664                 return size;
665         if (spec) {
666                 unsigned int i;
667                 uint32_t vtc_flow_val;
668                 uint32_t vtc_flow_mask;
669
670                 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
671                        RTE_DIM(ipv6.val.src_ip));
672                 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
673                        RTE_DIM(ipv6.val.dst_ip));
674                 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
675                        RTE_DIM(ipv6.mask.src_ip));
676                 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
677                        RTE_DIM(ipv6.mask.dst_ip));
678                 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
679                 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
680                 ipv6.val.flow_label =
681                         rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
682                                          IPV6_HDR_FL_SHIFT);
683                 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
684                                          IPV6_HDR_TC_SHIFT;
685                 ipv6.val.next_hdr = spec->hdr.proto;
686                 ipv6.val.hop_limit = spec->hdr.hop_limits;
687                 ipv6.mask.flow_label =
688                         rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
689                                          IPV6_HDR_FL_SHIFT);
690                 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
691                                           IPV6_HDR_TC_SHIFT;
692                 ipv6.mask.next_hdr = mask->hdr.proto;
693                 ipv6.mask.hop_limit = mask->hdr.hop_limits;
694                 /* Remove unwanted bits from values. */
695                 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
696                         ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
697                         ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
698                 }
699                 ipv6.val.flow_label &= ipv6.mask.flow_label;
700                 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
701                 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
702                 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
703         }
704         mlx5_flow_spec_verbs_add(flow, &ipv6, size);
705         return size;
706 }
707
708 /**
709  * Convert the @p pattern into a Verbs specifications after ensuring the NIC
710  * will understand and process it correctly.
711  * The conversion is performed item per item, each of them is written into
712  * the @p flow if its size is lesser or equal to @p flow_size.
713  * Validation and memory consumption computation are still performed until the
714  * end of @p pattern, unless an error is encountered.
715  *
716  * @param[in] pattern
717  *   Flow pattern.
718  * @param[in, out] flow
719  *   Pointer to the rte_flow structure.
720  * @param[in] flow_size
721  *   Size in bytes of the available space in @p flow, if too small some
722  *   garbage may be present.
723  * @param[out] error
724  *   Pointer to error structure.
725  *
726  * @return
727  *   On success the number of bytes consumed/necessary, if the returned value
728  *   is lesser or equal to @p flow_size, the @pattern  has fully been
729  *   converted, otherwise another call with this returned memory size should
730  *   be done.
731  *   On error, a negative errno value is returned and rte_errno is set.
732  */
733 static int
734 mlx5_flow_items(const struct rte_flow_item pattern[],
735                 struct rte_flow *flow, const size_t flow_size,
736                 struct rte_flow_error *error)
737 {
738         int remain = flow_size;
739         size_t size = 0;
740
741         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
742                 int ret = 0;
743
744                 switch (pattern->type) {
745                 case RTE_FLOW_ITEM_TYPE_VOID:
746                         break;
747                 case RTE_FLOW_ITEM_TYPE_ETH:
748                         ret = mlx5_flow_item_eth(pattern, flow, remain, error);
749                         break;
750                 case RTE_FLOW_ITEM_TYPE_VLAN:
751                         ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
752                         break;
753                 case RTE_FLOW_ITEM_TYPE_IPV4:
754                         ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
755                         break;
756                 case RTE_FLOW_ITEM_TYPE_IPV6:
757                         ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
758                         break;
759                 default:
760                         return rte_flow_error_set(error, ENOTSUP,
761                                                   RTE_FLOW_ERROR_TYPE_ITEM,
762                                                   pattern,
763                                                   "item not supported");
764                 }
765                 if (ret < 0)
766                         return ret;
767                 if (remain > ret)
768                         remain -= ret;
769                 else
770                         remain = 0;
771                 size += ret;
772         }
773         if (!flow->layers) {
774                 const struct rte_flow_item item = {
775                         .type = RTE_FLOW_ITEM_TYPE_ETH,
776                 };
777
778                 return mlx5_flow_item_eth(&item, flow, flow_size, error);
779         }
780         return size;
781 }
782
783 /**
784  * Convert the @p action into a Verbs specification after ensuring the NIC
785  * will understand and process it correctly.
786  * If the necessary size for the conversion is greater than the @p flow_size,
787  * nothing is written in @p flow, the validation is still performed.
788  *
789  * @param[in] action
790  *   Action configuration.
791  * @param[in, out] flow
792  *   Pointer to flow structure.
793  * @param[in] flow_size
794  *   Size in bytes of the available space in @p flow, if too small, nothing is
795  *   written.
796  * @param[out] error
797  *   Pointer to error structure.
798  *
799  * @return
800  *   On success the number of bytes consumed/necessary, if the returned value
801  *   is lesser or equal to @p flow_size, the @p action has fully been
802  *   converted, otherwise another call with this returned memory size should
803  *   be done.
804  *   On error, a negative errno value is returned and rte_errno is set.
805  */
806 static int
807 mlx5_flow_action_drop(const struct rte_flow_action *action,
808                       struct rte_flow *flow, const size_t flow_size,
809                       struct rte_flow_error *error)
810 {
811         unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
812         struct ibv_flow_spec_action_drop drop = {
813                         .type = IBV_FLOW_SPEC_ACTION_DROP,
814                         .size = size,
815         };
816
817         if (flow->fate)
818                 return rte_flow_error_set(error, ENOTSUP,
819                                           RTE_FLOW_ERROR_TYPE_ACTION,
820                                           action,
821                                           "multiple fate actions are not"
822                                           " supported");
823         if (size < flow_size)
824                 mlx5_flow_spec_verbs_add(flow, &drop, size);
825         flow->fate |= MLX5_FLOW_FATE_DROP;
826         return size;
827 }
828
829 /**
830  * Convert the @p action into @p flow after ensuring the NIC will understand
831  * and process it correctly.
832  *
833  * @param[in] dev
834  *   Pointer to Ethernet device structure.
835  * @param[in] action
836  *   Action configuration.
837  * @param[in, out] flow
838  *   Pointer to flow structure.
839  * @param[out] error
840  *   Pointer to error structure.
841  *
842  * @return
843  *   0 on success, a negative errno value otherwise and rte_errno is set.
844  */
845 static int
846 mlx5_flow_action_queue(struct rte_eth_dev *dev,
847                        const struct rte_flow_action *action,
848                        struct rte_flow *flow,
849                        struct rte_flow_error *error)
850 {
851         struct priv *priv = dev->data->dev_private;
852         const struct rte_flow_action_queue *queue = action->conf;
853
854         if (flow->fate)
855                 return rte_flow_error_set(error, ENOTSUP,
856                                           RTE_FLOW_ERROR_TYPE_ACTION,
857                                           action,
858                                           "multiple fate actions are not"
859                                           " supported");
860         if (queue->index >= priv->rxqs_n)
861                 return rte_flow_error_set(error, EINVAL,
862                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
863                                           &queue->index,
864                                           "queue index out of range");
865         if (!(*priv->rxqs)[queue->index])
866                 return rte_flow_error_set(error, EINVAL,
867                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
868                                           &queue->index,
869                                           "queue is not configured");
870         flow->queue = queue->index;
871         flow->fate |= MLX5_FLOW_FATE_QUEUE;
872         return 0;
873 }
874
875 /**
876  * Convert the @p action into @p flow after ensuring the NIC will understand
877  * and process it correctly.
878  * The conversion is performed action per action, each of them is written into
879  * the @p flow if its size is lesser or equal to @p flow_size.
880  * Validation and memory consumption computation are still performed until the
881  * end of @p action, unless an error is encountered.
882  *
883  * @param[in] dev
884  *   Pointer to Ethernet device structure.
885  * @param[in] actions
886  *   Pointer to flow actions array.
887  * @param[in, out] flow
888  *   Pointer to the rte_flow structure.
889  * @param[in] flow_size
890  *   Size in bytes of the available space in @p flow, if too small some
891  *   garbage may be present.
892  * @param[out] error
893  *   Pointer to error structure.
894  *
895  * @return
896  *   On success the number of bytes consumed/necessary, if the returned value
897  *   is lesser or equal to @p flow_size, the @p actions has fully been
898  *   converted, otherwise another call with this returned memory size should
899  *   be done.
900  *   On error, a negative errno value is returned and rte_errno is set.
901  */
902 static int
903 mlx5_flow_actions(struct rte_eth_dev *dev,
904                   const struct rte_flow_action actions[],
905                   struct rte_flow *flow, const size_t flow_size,
906                   struct rte_flow_error *error)
907 {
908         size_t size = 0;
909         int remain = flow_size;
910         int ret = 0;
911
912         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
913                 switch (actions->type) {
914                 case RTE_FLOW_ACTION_TYPE_VOID:
915                         break;
916                 case RTE_FLOW_ACTION_TYPE_DROP:
917                         ret = mlx5_flow_action_drop(actions, flow, remain,
918                                                     error);
919                         break;
920                 case RTE_FLOW_ACTION_TYPE_QUEUE:
921                         ret = mlx5_flow_action_queue(dev, actions, flow, error);
922                         break;
923                 default:
924                         return rte_flow_error_set(error, ENOTSUP,
925                                                   RTE_FLOW_ERROR_TYPE_ACTION,
926                                                   actions,
927                                                   "action not supported");
928                 }
929                 if (ret < 0)
930                         return ret;
931                 if (remain > ret)
932                         remain -= ret;
933                 else
934                         remain = 0;
935                 size += ret;
936         }
937         if (!flow->fate)
938                 return rte_flow_error_set(error, ENOTSUP,
939                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
940                                           NULL,
941                                           "no fate action found");
942         return size;
943 }
944
945 /**
946  * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
947  * after ensuring the NIC will understand and process it correctly.
948  * The conversion is only performed item/action per item/action, each of
949  * them is written into the @p flow if its size is lesser or equal to @p
950  * flow_size.
951  * Validation and memory consumption computation are still performed until the
952  * end, unless an error is encountered.
953  *
954  * @param[in] dev
955  *   Pointer to Ethernet device.
956  * @param[in, out] flow
957  *   Pointer to flow structure.
958  * @param[in] flow_size
959  *   Size in bytes of the available space in @p flow, if too small some
960  *   garbage may be present.
961  * @param[in] attributes
962  *   Flow rule attributes.
963  * @param[in] pattern
964  *   Pattern specification (list terminated by the END pattern item).
965  * @param[in] actions
966  *   Associated actions (list terminated by the END action).
967  * @param[out] error
968  *   Perform verbose error reporting if not NULL.
969  *
970  * @return
971  *   On success the number of bytes consumed/necessary, if the returned value
972  *   is lesser or equal to @p flow_size, the flow has fully been converted and
973  *   can be applied, otherwise another call with this returned memory size
974  *   should be done.
975  *   On error, a negative errno value is returned and rte_errno is set.
976  */
977 static int
978 mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
979                 const size_t flow_size,
980                 const struct rte_flow_attr *attributes,
981                 const struct rte_flow_item pattern[],
982                 const struct rte_flow_action actions[],
983                 struct rte_flow_error *error)
984 {
985         struct rte_flow local_flow = { .layers = 0, };
986         size_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);
987         int remain = (flow_size > size) ? flow_size - size : 0;
988         int ret;
989
990         if (!remain)
991                 flow = &local_flow;
992         ret = mlx5_flow_attributes(dev, attributes, flow, error);
993         if (ret < 0)
994                 return ret;
995         ret = mlx5_flow_items(pattern, flow, remain, error);
996         if (ret < 0)
997                 return ret;
998         size += ret;
999         remain = (flow_size > size) ? flow_size - size : 0;
1000         ret = mlx5_flow_actions(dev, actions, flow, remain, error);
1001         if (ret < 0)
1002                 return ret;
1003         size += ret;
1004         if (size <= flow_size)
1005                 flow->verbs.attr->priority = flow->attributes.priority;
1006         return size;
1007 }
1008
1009 /**
1010  * Validate a flow supported by the NIC.
1011  *
1012  * @see rte_flow_validate()
1013  * @see rte_flow_ops
1014  */
1015 int
1016 mlx5_flow_validate(struct rte_eth_dev *dev,
1017                    const struct rte_flow_attr *attr,
1018                    const struct rte_flow_item items[],
1019                    const struct rte_flow_action actions[],
1020                    struct rte_flow_error *error)
1021 {
1022         int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
1023
1024         if (ret < 0)
1025                 return ret;
1026         return 0;
1027 }
1028
1029 /**
1030  * Remove the flow.
1031  *
1032  * @param[in] dev
1033  *   Pointer to Ethernet device.
1034  * @param[in, out] flow
1035  *   Pointer to flow structure.
1036  */
1037 static void
1038 mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1039 {
1040         if (flow->fate & MLX5_FLOW_FATE_DROP) {
1041                 if (flow->verbs.flow) {
1042                         claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
1043                         flow->verbs.flow = NULL;
1044                 }
1045         }
1046         if (flow->verbs.hrxq) {
1047                 if (flow->fate & MLX5_FLOW_FATE_DROP)
1048                         mlx5_hrxq_drop_release(dev);
1049                 else if (flow->fate & MLX5_FLOW_FATE_QUEUE)
1050                         mlx5_hrxq_release(dev, flow->verbs.hrxq);
1051                 flow->verbs.hrxq = NULL;
1052         }
1053 }
1054
1055 /**
1056  * Apply the flow.
1057  *
1058  * @param[in] dev
1059  *   Pointer to Ethernet device structure.
1060  * @param[in, out] flow
1061  *   Pointer to flow structure.
1062  * @param[out] error
1063  *   Pointer to error structure.
1064  *
1065  * @return
1066  *   0 on success, a negative errno value otherwise and rte_errno is set.
1067  */
1068 static int
1069 mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1070                 struct rte_flow_error *error)
1071 {
1072         if (flow->fate & MLX5_FLOW_FATE_DROP) {
1073                 flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
1074                 if (!flow->verbs.hrxq)
1075                         return rte_flow_error_set
1076                                 (error, errno,
1077                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1078                                  NULL,
1079                                  "cannot allocate Drop queue");
1080         } else if (flow->fate & MLX5_FLOW_FATE_QUEUE) {
1081                 struct mlx5_hrxq *hrxq;
1082
1083                 hrxq = mlx5_hrxq_get(dev, rss_hash_default_key,
1084                                      rss_hash_default_key_len, 0,
1085                                      &flow->queue, 1, 0, 0);
1086                 if (!hrxq)
1087                         hrxq = mlx5_hrxq_new(dev, rss_hash_default_key,
1088                                              rss_hash_default_key_len, 0,
1089                                              &flow->queue, 1, 0, 0);
1090                 if (!hrxq)
1091                         return rte_flow_error_set(error, rte_errno,
1092                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1093                                         NULL,
1094                                         "cannot create flow");
1095                 flow->verbs.hrxq = hrxq;
1096         }
1097         flow->verbs.flow =
1098                 mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
1099         if (!flow->verbs.flow) {
1100                 if (flow->fate & MLX5_FLOW_FATE_DROP)
1101                         mlx5_hrxq_drop_release(dev);
1102                 else
1103                         mlx5_hrxq_release(dev, flow->verbs.hrxq);
1104                 flow->verbs.hrxq = NULL;
1105                 return rte_flow_error_set(error, errno,
1106                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1107                                           NULL,
1108                                           "kernel module refuses to create"
1109                                           " flow");
1110         }
1111         return 0;
1112 }
1113
1114 /**
1115  * Create a flow and add it to @p list.
1116  *
1117  * @param dev
1118  *   Pointer to Ethernet device.
1119  * @param list
1120  *   Pointer to a TAILQ flow list.
1121  * @param[in] attr
1122  *   Flow rule attributes.
1123  * @param[in] items
1124  *   Pattern specification (list terminated by the END pattern item).
1125  * @param[in] actions
1126  *   Associated actions (list terminated by the END action).
1127  * @param[out] error
1128  *   Perform verbose error reporting if not NULL.
1129  *
1130  * @return
1131  *   A flow on success, NULL otherwise and rte_errno is set.
1132  */
1133 static struct rte_flow *
1134 mlx5_flow_list_create(struct rte_eth_dev *dev,
1135                       struct mlx5_flows *list,
1136                       const struct rte_flow_attr *attr,
1137                       const struct rte_flow_item items[],
1138                       const struct rte_flow_action actions[],
1139                       struct rte_flow_error *error)
1140 {
1141         struct rte_flow *flow;
1142         size_t size;
1143         int ret;
1144
1145         ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
1146         if (ret < 0)
1147                 return NULL;
1148         size = ret;
1149         flow = rte_zmalloc(__func__, size, 0);
1150         if (!flow) {
1151                 rte_flow_error_set(error, ENOMEM,
1152                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1153                                    NULL,
1154                                    "cannot allocate memory");
1155                 return NULL;
1156         }
1157         flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
1158         flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
1159         ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
1160         if (ret < 0)
1161                 goto error;
1162         assert((size_t)ret == size);
1163         if (dev->data->dev_started) {
1164                 ret = mlx5_flow_apply(dev, flow, error);
1165                 if (ret < 0)
1166                         goto error;
1167         }
1168         TAILQ_INSERT_TAIL(list, flow, next);
1169         return flow;
1170 error:
1171         ret = rte_errno; /* Save rte_errno before cleanup. */
1172         mlx5_flow_remove(dev, flow);
1173         rte_free(flow);
1174         rte_errno = ret; /* Restore rte_errno. */
1175         return NULL;
1176 }
1177
1178 /**
1179  * Create a flow.
1180  *
1181  * @see rte_flow_create()
1182  * @see rte_flow_ops
1183  */
1184 struct rte_flow *
1185 mlx5_flow_create(struct rte_eth_dev *dev,
1186                  const struct rte_flow_attr *attr,
1187                  const struct rte_flow_item items[],
1188                  const struct rte_flow_action actions[],
1189                  struct rte_flow_error *error)
1190 {
1191         return mlx5_flow_list_create
1192                 (dev, &((struct priv *)dev->data->dev_private)->flows,
1193                  attr, items, actions, error);
1194 }
1195
1196 /**
1197  * Destroy a flow in a list.
1198  *
1199  * @param dev
1200  *   Pointer to Ethernet device.
1201  * @param list
1202  *   Pointer to a TAILQ flow list.
1203  * @param[in] flow
1204  *   Flow to destroy.
1205  */
1206 static void
1207 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
1208                        struct rte_flow *flow)
1209 {
1210         mlx5_flow_remove(dev, flow);
1211         TAILQ_REMOVE(list, flow, next);
1212         rte_free(flow);
1213 }
1214
1215 /**
1216  * Destroy all flows.
1217  *
1218  * @param dev
1219  *   Pointer to Ethernet device.
1220  * @param list
1221  *   Pointer to a TAILQ flow list.
1222  */
1223 void
1224 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
1225 {
1226         while (!TAILQ_EMPTY(list)) {
1227                 struct rte_flow *flow;
1228
1229                 flow = TAILQ_FIRST(list);
1230                 mlx5_flow_list_destroy(dev, list, flow);
1231         }
1232 }
1233
1234 /**
1235  * Remove all flows.
1236  *
1237  * @param dev
1238  *   Pointer to Ethernet device.
1239  * @param list
1240  *   Pointer to a TAILQ flow list.
1241  */
1242 void
1243 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
1244 {
1245         struct rte_flow *flow;
1246
1247         TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
1248                 mlx5_flow_remove(dev, flow);
1249 }
1250
1251 /**
1252  * Add all flows.
1253  *
1254  * @param dev
1255  *   Pointer to Ethernet device.
1256  * @param list
1257  *   Pointer to a TAILQ flow list.
1258  *
1259  * @return
1260  *   0 on success, a negative errno value otherwise and rte_errno is set.
1261  */
1262 int
1263 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
1264 {
1265         struct rte_flow *flow;
1266         struct rte_flow_error error;
1267         int ret = 0;
1268
1269         TAILQ_FOREACH(flow, list, next) {
1270                 ret = mlx5_flow_apply(dev, flow, &error);
1271                 if (ret < 0)
1272                         goto error;
1273         }
1274         return 0;
1275 error:
1276         ret = rte_errno; /* Save rte_errno before cleanup. */
1277         mlx5_flow_stop(dev, list);
1278         rte_errno = ret; /* Restore rte_errno. */
1279         return -rte_errno;
1280 }
1281
1282 /**
1283  * Verify the flow list is empty
1284  *
1285  * @param dev
1286  *  Pointer to Ethernet device.
1287  *
1288  * @return the number of flows not released.
1289  */
1290 int
1291 mlx5_flow_verify(struct rte_eth_dev *dev)
1292 {
1293         struct priv *priv = dev->data->dev_private;
1294         struct rte_flow *flow;
1295         int ret = 0;
1296
1297         TAILQ_FOREACH(flow, &priv->flows, next) {
1298                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
1299                         dev->data->port_id, (void *)flow);
1300                 ++ret;
1301         }
1302         return ret;
1303 }
1304
1305 /**
1306  * Enable a control flow configured from the control plane.
1307  *
1308  * @param dev
1309  *   Pointer to Ethernet device.
1310  * @param eth_spec
1311  *   An Ethernet flow spec to apply.
1312  * @param eth_mask
1313  *   An Ethernet flow mask to apply.
1314  * @param vlan_spec
1315  *   A VLAN flow spec to apply.
1316  * @param vlan_mask
1317  *   A VLAN flow mask to apply.
1318  *
1319  * @return
1320  *   0 on success, a negative errno value otherwise and rte_errno is set.
1321  */
1322 int
1323 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1324                     struct rte_flow_item_eth *eth_spec,
1325                     struct rte_flow_item_eth *eth_mask,
1326                     struct rte_flow_item_vlan *vlan_spec,
1327                     struct rte_flow_item_vlan *vlan_mask)
1328 {
1329         struct priv *priv = dev->data->dev_private;
1330         const struct rte_flow_attr attr = {
1331                 .ingress = 1,
1332                 .priority = priv->config.flow_prio - 1,
1333         };
1334         struct rte_flow_item items[] = {
1335                 {
1336                         .type = RTE_FLOW_ITEM_TYPE_ETH,
1337                         .spec = eth_spec,
1338                         .last = NULL,
1339                         .mask = eth_mask,
1340                 },
1341                 {
1342                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
1343                                 RTE_FLOW_ITEM_TYPE_END,
1344                         .spec = vlan_spec,
1345                         .last = NULL,
1346                         .mask = vlan_mask,
1347                 },
1348                 {
1349                         .type = RTE_FLOW_ITEM_TYPE_END,
1350                 },
1351         };
1352         uint16_t queue[priv->reta_idx_n];
1353         struct rte_flow_action_rss action_rss = {
1354                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
1355                 .level = 0,
1356                 .types = priv->rss_conf.rss_hf,
1357                 .key_len = priv->rss_conf.rss_key_len,
1358                 .queue_num = priv->reta_idx_n,
1359                 .key = priv->rss_conf.rss_key,
1360                 .queue = queue,
1361         };
1362         struct rte_flow_action actions[] = {
1363                 {
1364                         .type = RTE_FLOW_ACTION_TYPE_RSS,
1365                         .conf = &action_rss,
1366                 },
1367                 {
1368                         .type = RTE_FLOW_ACTION_TYPE_END,
1369                 },
1370         };
1371         struct rte_flow *flow;
1372         struct rte_flow_error error;
1373         unsigned int i;
1374
1375         if (!priv->reta_idx_n) {
1376                 rte_errno = EINVAL;
1377                 return -rte_errno;
1378         }
1379         for (i = 0; i != priv->reta_idx_n; ++i)
1380                 queue[i] = (*priv->reta_idx)[i];
1381         flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
1382                                      actions, &error);
1383         if (!flow)
1384                 return -rte_errno;
1385         return 0;
1386 }
1387
1388 /**
1389  * Enable a flow control configured from the control plane.
1390  *
1391  * @param dev
1392  *   Pointer to Ethernet device.
1393  * @param eth_spec
1394  *   An Ethernet flow spec to apply.
1395  * @param eth_mask
1396  *   An Ethernet flow mask to apply.
1397  *
1398  * @return
1399  *   0 on success, a negative errno value otherwise and rte_errno is set.
1400  */
1401 int
1402 mlx5_ctrl_flow(struct rte_eth_dev *dev,
1403                struct rte_flow_item_eth *eth_spec,
1404                struct rte_flow_item_eth *eth_mask)
1405 {
1406         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
1407 }
1408
1409 /**
1410  * Destroy a flow.
1411  *
1412  * @see rte_flow_destroy()
1413  * @see rte_flow_ops
1414  */
1415 int
1416 mlx5_flow_destroy(struct rte_eth_dev *dev,
1417                   struct rte_flow *flow,
1418                   struct rte_flow_error *error __rte_unused)
1419 {
1420         struct priv *priv = dev->data->dev_private;
1421
1422         mlx5_flow_list_destroy(dev, &priv->flows, flow);
1423         return 0;
1424 }
1425
1426 /**
1427  * Destroy all flows.
1428  *
1429  * @see rte_flow_flush()
1430  * @see rte_flow_ops
1431  */
1432 int
1433 mlx5_flow_flush(struct rte_eth_dev *dev,
1434                 struct rte_flow_error *error __rte_unused)
1435 {
1436         struct priv *priv = dev->data->dev_private;
1437
1438         mlx5_flow_list_flush(dev, &priv->flows);
1439         return 0;
1440 }
1441
1442 /**
1443  * Isolated mode.
1444  *
1445  * @see rte_flow_isolate()
1446  * @see rte_flow_ops
1447  */
1448 int
1449 mlx5_flow_isolate(struct rte_eth_dev *dev,
1450                   int enable,
1451                   struct rte_flow_error *error)
1452 {
1453         struct priv *priv = dev->data->dev_private;
1454
1455         if (dev->data->dev_started) {
1456                 rte_flow_error_set(error, EBUSY,
1457                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1458                                    NULL,
1459                                    "port must be stopped first");
1460                 return -rte_errno;
1461         }
1462         priv->isolated = !!enable;
1463         if (enable)
1464                 dev->dev_ops = &mlx5_dev_ops_isolate;
1465         else
1466                 dev->dev_ops = &mlx5_dev_ops;
1467         return 0;
1468 }
1469
1470 /**
1471  * Convert a flow director filter to a generic flow.
1472  *
1473  * @param dev
1474  *   Pointer to Ethernet device.
1475  * @param fdir_filter
1476  *   Flow director filter to add.
1477  * @param attributes
1478  *   Generic flow parameters structure.
1479  *
1480  * @return
1481  *   0 on success, a negative errno value otherwise and rte_errno is set.
1482  */
1483 static int
1484 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
1485                          const struct rte_eth_fdir_filter *fdir_filter,
1486                          struct mlx5_fdir *attributes)
1487 {
1488         struct priv *priv = dev->data->dev_private;
1489         const struct rte_eth_fdir_input *input = &fdir_filter->input;
1490         const struct rte_eth_fdir_masks *mask =
1491                 &dev->data->dev_conf.fdir_conf.mask;
1492
1493         /* Validate queue number. */
1494         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
1495                 DRV_LOG(ERR, "port %u invalid queue number %d",
1496                         dev->data->port_id, fdir_filter->action.rx_queue);
1497                 rte_errno = EINVAL;
1498                 return -rte_errno;
1499         }
1500         attributes->attr.ingress = 1;
1501         attributes->items[0] = (struct rte_flow_item) {
1502                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1503                 .spec = &attributes->l2,
1504                 .mask = &attributes->l2_mask,
1505         };
1506         switch (fdir_filter->action.behavior) {
1507         case RTE_ETH_FDIR_ACCEPT:
1508                 attributes->actions[0] = (struct rte_flow_action){
1509                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1510                         .conf = &attributes->queue,
1511                 };
1512                 break;
1513         case RTE_ETH_FDIR_REJECT:
1514                 attributes->actions[0] = (struct rte_flow_action){
1515                         .type = RTE_FLOW_ACTION_TYPE_DROP,
1516                 };
1517                 break;
1518         default:
1519                 DRV_LOG(ERR, "port %u invalid behavior %d",
1520                         dev->data->port_id,
1521                         fdir_filter->action.behavior);
1522                 rte_errno = ENOTSUP;
1523                 return -rte_errno;
1524         }
1525         attributes->queue.index = fdir_filter->action.rx_queue;
1526         /* Handle L3. */
1527         switch (fdir_filter->input.flow_type) {
1528         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1529         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1530         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1531                 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
1532                         .src_addr = input->flow.ip4_flow.src_ip,
1533                         .dst_addr = input->flow.ip4_flow.dst_ip,
1534                         .time_to_live = input->flow.ip4_flow.ttl,
1535                         .type_of_service = input->flow.ip4_flow.tos,
1536                         .next_proto_id = input->flow.ip4_flow.proto,
1537                 };
1538                 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
1539                         .src_addr = mask->ipv4_mask.src_ip,
1540                         .dst_addr = mask->ipv4_mask.dst_ip,
1541                         .time_to_live = mask->ipv4_mask.ttl,
1542                         .type_of_service = mask->ipv4_mask.tos,
1543                         .next_proto_id = mask->ipv4_mask.proto,
1544                 };
1545                 attributes->items[1] = (struct rte_flow_item){
1546                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
1547                         .spec = &attributes->l3,
1548                         .mask = &attributes->l3_mask,
1549                 };
1550                 break;
1551         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1552         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1553         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1554                 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
1555                         .hop_limits = input->flow.ipv6_flow.hop_limits,
1556                         .proto = input->flow.ipv6_flow.proto,
1557                 };
1558
1559                 memcpy(attributes->l3.ipv6.hdr.src_addr,
1560                        input->flow.ipv6_flow.src_ip,
1561                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1562                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
1563                        input->flow.ipv6_flow.dst_ip,
1564                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1565                 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
1566                        mask->ipv6_mask.src_ip,
1567                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1568                 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
1569                        mask->ipv6_mask.dst_ip,
1570                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1571                 attributes->items[1] = (struct rte_flow_item){
1572                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
1573                         .spec = &attributes->l3,
1574                         .mask = &attributes->l3_mask,
1575                 };
1576                 break;
1577         default:
1578                 DRV_LOG(ERR, "port %u invalid flow type%d",
1579                         dev->data->port_id, fdir_filter->input.flow_type);
1580                 rte_errno = ENOTSUP;
1581                 return -rte_errno;
1582         }
1583         /* Handle L4. */
1584         switch (fdir_filter->input.flow_type) {
1585         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1586                 attributes->l4.udp.hdr = (struct udp_hdr){
1587                         .src_port = input->flow.udp4_flow.src_port,
1588                         .dst_port = input->flow.udp4_flow.dst_port,
1589                 };
1590                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1591                         .src_port = mask->src_port_mask,
1592                         .dst_port = mask->dst_port_mask,
1593                 };
1594                 attributes->items[2] = (struct rte_flow_item){
1595                         .type = RTE_FLOW_ITEM_TYPE_UDP,
1596                         .spec = &attributes->l4,
1597                         .mask = &attributes->l4_mask,
1598                 };
1599                 break;
1600         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1601                 attributes->l4.tcp.hdr = (struct tcp_hdr){
1602                         .src_port = input->flow.tcp4_flow.src_port,
1603                         .dst_port = input->flow.tcp4_flow.dst_port,
1604                 };
1605                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1606                         .src_port = mask->src_port_mask,
1607                         .dst_port = mask->dst_port_mask,
1608                 };
1609                 attributes->items[2] = (struct rte_flow_item){
1610                         .type = RTE_FLOW_ITEM_TYPE_TCP,
1611                         .spec = &attributes->l4,
1612                         .mask = &attributes->l4_mask,
1613                 };
1614                 break;
1615         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1616                 attributes->l4.udp.hdr = (struct udp_hdr){
1617                         .src_port = input->flow.udp6_flow.src_port,
1618                         .dst_port = input->flow.udp6_flow.dst_port,
1619                 };
1620                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1621                         .src_port = mask->src_port_mask,
1622                         .dst_port = mask->dst_port_mask,
1623                 };
1624                 attributes->items[2] = (struct rte_flow_item){
1625                         .type = RTE_FLOW_ITEM_TYPE_UDP,
1626                         .spec = &attributes->l4,
1627                         .mask = &attributes->l4_mask,
1628                 };
1629                 break;
1630         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1631                 attributes->l4.tcp.hdr = (struct tcp_hdr){
1632                         .src_port = input->flow.tcp6_flow.src_port,
1633                         .dst_port = input->flow.tcp6_flow.dst_port,
1634                 };
1635                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1636                         .src_port = mask->src_port_mask,
1637                         .dst_port = mask->dst_port_mask,
1638                 };
1639                 attributes->items[2] = (struct rte_flow_item){
1640                         .type = RTE_FLOW_ITEM_TYPE_TCP,
1641                         .spec = &attributes->l4,
1642                         .mask = &attributes->l4_mask,
1643                 };
1644                 break;
1645         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1646         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1647                 break;
1648         default:
1649                 DRV_LOG(ERR, "port %u invalid flow type%d",
1650                         dev->data->port_id, fdir_filter->input.flow_type);
1651                 rte_errno = ENOTSUP;
1652                 return -rte_errno;
1653         }
1654         return 0;
1655 }
1656
1657 /**
1658  * Add new flow director filter and store it in list.
1659  *
1660  * @param dev
1661  *   Pointer to Ethernet device.
1662  * @param fdir_filter
1663  *   Flow director filter to add.
1664  *
1665  * @return
1666  *   0 on success, a negative errno value otherwise and rte_errno is set.
1667  */
1668 static int
1669 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
1670                      const struct rte_eth_fdir_filter *fdir_filter)
1671 {
1672         struct priv *priv = dev->data->dev_private;
1673         struct mlx5_fdir attributes = {
1674                 .attr.group = 0,
1675                 .l2_mask = {
1676                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1677                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1678                         .type = 0,
1679                 },
1680         };
1681         struct rte_flow_error error;
1682         struct rte_flow *flow;
1683         int ret;
1684
1685         ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
1686         if (ret)
1687                 return ret;
1688         flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
1689                                      attributes.items, attributes.actions,
1690                                      &error);
1691         if (flow) {
1692                 DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
1693                         (void *)flow);
1694                 return 0;
1695         }
1696         return -rte_errno;
1697 }
1698
1699 /**
1700  * Delete specific filter.
1701  *
1702  * @param dev
1703  *   Pointer to Ethernet device.
1704  * @param fdir_filter
1705  *   Filter to be deleted.
1706  *
1707  * @return
1708  *   0 on success, a negative errno value otherwise and rte_errno is set.
1709  */
1710 static int
1711 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
1712                         const struct rte_eth_fdir_filter *fdir_filter
1713                         __rte_unused)
1714 {
1715         rte_errno = ENOTSUP;
1716         return -rte_errno;
1717 }
1718
1719 /**
1720  * Update queue for specific filter.
1721  *
1722  * @param dev
1723  *   Pointer to Ethernet device.
1724  * @param fdir_filter
1725  *   Filter to be updated.
1726  *
1727  * @return
1728  *   0 on success, a negative errno value otherwise and rte_errno is set.
1729  */
1730 static int
1731 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
1732                         const struct rte_eth_fdir_filter *fdir_filter)
1733 {
1734         int ret;
1735
1736         ret = mlx5_fdir_filter_delete(dev, fdir_filter);
1737         if (ret)
1738                 return ret;
1739         return mlx5_fdir_filter_add(dev, fdir_filter);
1740 }
1741
1742 /**
1743  * Flush all filters.
1744  *
1745  * @param dev
1746  *   Pointer to Ethernet device.
1747  */
1748 static void
1749 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
1750 {
1751         struct priv *priv = dev->data->dev_private;
1752
1753         mlx5_flow_list_flush(dev, &priv->flows);
1754 }
1755
1756 /**
1757  * Get flow director information.
1758  *
1759  * @param dev
1760  *   Pointer to Ethernet device.
1761  * @param[out] fdir_info
1762  *   Resulting flow director information.
1763  */
1764 static void
1765 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1766 {
1767         struct rte_eth_fdir_masks *mask =
1768                 &dev->data->dev_conf.fdir_conf.mask;
1769
1770         fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1771         fdir_info->guarant_spc = 0;
1772         rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
1773         fdir_info->max_flexpayload = 0;
1774         fdir_info->flow_types_mask[0] = 0;
1775         fdir_info->flex_payload_unit = 0;
1776         fdir_info->max_flex_payload_segment_num = 0;
1777         fdir_info->flex_payload_limit = 0;
1778         memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
1779 }
1780
1781 /**
1782  * Deal with flow director operations.
1783  *
1784  * @param dev
1785  *   Pointer to Ethernet device.
1786  * @param filter_op
1787  *   Operation to perform.
1788  * @param arg
1789  *   Pointer to operation-specific structure.
1790  *
1791  * @return
1792  *   0 on success, a negative errno value otherwise and rte_errno is set.
1793  */
1794 static int
1795 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1796                     void *arg)
1797 {
1798         enum rte_fdir_mode fdir_mode =
1799                 dev->data->dev_conf.fdir_conf.mode;
1800
1801         if (filter_op == RTE_ETH_FILTER_NOP)
1802                 return 0;
1803         if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
1804             fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1805                 DRV_LOG(ERR, "port %u flow director mode %d not supported",
1806                         dev->data->port_id, fdir_mode);
1807                 rte_errno = EINVAL;
1808                 return -rte_errno;
1809         }
1810         switch (filter_op) {
1811         case RTE_ETH_FILTER_ADD:
1812                 return mlx5_fdir_filter_add(dev, arg);
1813         case RTE_ETH_FILTER_UPDATE:
1814                 return mlx5_fdir_filter_update(dev, arg);
1815         case RTE_ETH_FILTER_DELETE:
1816                 return mlx5_fdir_filter_delete(dev, arg);
1817         case RTE_ETH_FILTER_FLUSH:
1818                 mlx5_fdir_filter_flush(dev);
1819                 break;
1820         case RTE_ETH_FILTER_INFO:
1821                 mlx5_fdir_info_get(dev, arg);
1822                 break;
1823         default:
1824                 DRV_LOG(DEBUG, "port %u unknown operation %u",
1825                         dev->data->port_id, filter_op);
1826                 rte_errno = EINVAL;
1827                 return -rte_errno;
1828         }
1829         return 0;
1830 }
1831
1832 /**
1833  * Manage filter operations.
1834  *
1835  * @param dev
1836  *   Pointer to Ethernet device structure.
1837  * @param filter_type
1838  *   Filter type.
1839  * @param filter_op
1840  *   Operation to perform.
1841  * @param arg
1842  *   Pointer to operation-specific structure.
1843  *
1844  * @return
1845  *   0 on success, a negative errno value otherwise and rte_errno is set.
1846  */
1847 int
1848 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
1849                      enum rte_filter_type filter_type,
1850                      enum rte_filter_op filter_op,
1851                      void *arg)
1852 {
1853         switch (filter_type) {
1854         case RTE_ETH_FILTER_GENERIC:
1855                 if (filter_op != RTE_ETH_FILTER_GET) {
1856                         rte_errno = EINVAL;
1857                         return -rte_errno;
1858                 }
1859                 *(const void **)arg = &mlx5_flow_ops;
1860                 return 0;
1861         case RTE_ETH_FILTER_FDIR:
1862                 return mlx5_fdir_ctrl_func(dev, filter_op, arg);
1863         default:
1864                 DRV_LOG(ERR, "port %u filter type (%d) not supported",
1865                         dev->data->port_id, filter_type);
1866                 rte_errno = ENOTSUP;
1867                 return -rte_errno;
1868         }
1869         return 0;
1870 }