net/mlx5: support flow Ethernet item along with drop action
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <sys/queue.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_prm.h"
32 #include "mlx5_glue.h"
33
34 /* Dev ops structure defined in mlx5.c */
35 extern const struct eth_dev_ops mlx5_dev_ops;
36 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
37
38 /* Pattern Layer bits. */
39 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
40 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
41 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
42 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
43 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
44 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
45 /* Masks. */
46 #define MLX5_FLOW_LAYER_OUTER_L3 \
47         (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
48 #define MLX5_FLOW_LAYER_OUTER_L4 \
49         (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
50
51 /* Actions that modify the fate of matching traffic. */
52 #define MLX5_FLOW_FATE_DROP (1u << 0)
53
54 /** Handles information leading to a drop fate. */
55 struct mlx5_flow_verbs {
56         unsigned int size; /**< Size of the attribute. */
57         struct {
58                 struct ibv_flow_attr *attr;
59                 /**< Pointer to the Specification buffer. */
60                 uint8_t *specs; /**< Pointer to the specifications. */
61         };
62         struct ibv_flow *flow; /**< Verbs flow pointer. */
63         struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
64 };
65
66 /* Flow structure. */
67 struct rte_flow {
68         TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
69         struct rte_flow_attr attributes; /**< User flow attribute. */
70         uint32_t layers;
71         /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
72         uint32_t fate;
73         /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
74         struct mlx5_flow_verbs verbs; /* Verbs drop flow. */
75 };
76
77 static const struct rte_flow_ops mlx5_flow_ops = {
78         .validate = mlx5_flow_validate,
79         .create = mlx5_flow_create,
80         .destroy = mlx5_flow_destroy,
81         .flush = mlx5_flow_flush,
82         .isolate = mlx5_flow_isolate,
83 };
84
85 /* Convert FDIR request to Generic flow. */
86 struct mlx5_fdir {
87         struct rte_flow_attr attr;
88         struct rte_flow_action actions[2];
89         struct rte_flow_item items[4];
90         struct rte_flow_item_eth l2;
91         struct rte_flow_item_eth l2_mask;
92         union {
93                 struct rte_flow_item_ipv4 ipv4;
94                 struct rte_flow_item_ipv6 ipv6;
95         } l3;
96         union {
97                 struct rte_flow_item_ipv4 ipv4;
98                 struct rte_flow_item_ipv6 ipv6;
99         } l3_mask;
100         union {
101                 struct rte_flow_item_udp udp;
102                 struct rte_flow_item_tcp tcp;
103         } l4;
104         union {
105                 struct rte_flow_item_udp udp;
106                 struct rte_flow_item_tcp tcp;
107         } l4_mask;
108         struct rte_flow_action_queue queue;
109 };
110
111 /* Verbs specification header. */
112 struct ibv_spec_header {
113         enum ibv_flow_spec_type type;
114         uint16_t size;
115 };
116
117  /**
118   * Discover the maximum number of priority available.
119   *
120   * @param[in] dev
121   *   Pointer to Ethernet device.
122   *
123   * @return
124   *   number of supported flow priority on success, a negative errno value
125   *   otherwise and rte_errno is set.
126   */
127 int
128 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
129 {
130         struct {
131                 struct ibv_flow_attr attr;
132                 struct ibv_flow_spec_eth eth;
133                 struct ibv_flow_spec_action_drop drop;
134         } flow_attr = {
135                 .attr = {
136                         .num_of_specs = 2,
137                 },
138                 .eth = {
139                         .type = IBV_FLOW_SPEC_ETH,
140                         .size = sizeof(struct ibv_flow_spec_eth),
141                 },
142                 .drop = {
143                         .size = sizeof(struct ibv_flow_spec_action_drop),
144                         .type = IBV_FLOW_SPEC_ACTION_DROP,
145                 },
146         };
147         struct ibv_flow *flow;
148         struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
149         uint16_t vprio[] = { 8, 16 };
150         int i;
151
152         if (!drop) {
153                 rte_errno = ENOTSUP;
154                 return -rte_errno;
155         }
156         for (i = 0; i != RTE_DIM(vprio); i++) {
157                 flow_attr.attr.priority = vprio[i] - 1;
158                 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
159                 if (!flow)
160                         break;
161                 claim_zero(mlx5_glue->destroy_flow(flow));
162         }
163         mlx5_hrxq_drop_release(dev);
164         DRV_LOG(INFO, "port %u flow maximum priority: %d",
165                 dev->data->port_id, vprio[i - 1]);
166         return vprio[i - 1];
167 }
168
169 /**
170  * Verify the @p attributes will be correctly understood by the NIC and store
171  * them in the @p flow if everything is correct.
172  *
173  * @param[in] dev
174  *   Pointer to Ethernet device.
175  * @param[in] attributes
176  *   Pointer to flow attributes
177  * @param[in, out] flow
178  *   Pointer to the rte_flow structure.
179  * @param[out] error
180  *   Pointer to error structure.
181  *
182  * @return
183  *   0 on success, a negative errno value otherwise and rte_errno is set.
184  */
185 static int
186 mlx5_flow_attributes(struct rte_eth_dev *dev,
187                      const struct rte_flow_attr *attributes,
188                      struct rte_flow *flow,
189                      struct rte_flow_error *error)
190 {
191         uint32_t priority_max =
192                 ((struct priv *)dev->data->dev_private)->config.flow_prio;
193
194         if (attributes->group)
195                 return rte_flow_error_set(error, ENOTSUP,
196                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
197                                           NULL,
198                                           "groups is not supported");
199         if (attributes->priority >= priority_max)
200                 return rte_flow_error_set(error, ENOTSUP,
201                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
202                                           NULL,
203                                           "priority out of range");
204         if (attributes->egress)
205                 return rte_flow_error_set(error, ENOTSUP,
206                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
207                                           NULL,
208                                           "egress is not supported");
209         if (attributes->transfer)
210                 return rte_flow_error_set(error, ENOTSUP,
211                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
212                                           NULL,
213                                           "transfer is not supported");
214         if (!attributes->ingress)
215                 return rte_flow_error_set(error, ENOTSUP,
216                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
217                                           NULL,
218                                           "ingress attribute is mandatory");
219         flow->attributes = *attributes;
220         return 0;
221 }
222
223 /**
224  * Verify the @p item specifications (spec, last, mask) are compatible with the
225  * NIC capabilities.
226  *
227  * @param[in] item
228  *   Item specification.
229  * @param[in] mask
230  *   @p item->mask or flow default bit-masks.
231  * @param[in] nic_mask
232  *   Bit-masks covering supported fields by the NIC to compare with user mask.
233  * @param[in] size
234  *   Bit-masks size in bytes.
235  * @param[out] error
236  *   Pointer to error structure.
237  *
238  * @return
239  *   0 on success, a negative errno value otherwise and rte_errno is set.
240  */
241 static int
242 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
243                           const uint8_t *mask,
244                           const uint8_t *nic_mask,
245                           unsigned int size,
246                           struct rte_flow_error *error)
247 {
248         unsigned int i;
249
250         assert(nic_mask);
251         for (i = 0; i < size; ++i)
252                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
253                         return rte_flow_error_set(error, ENOTSUP,
254                                                   RTE_FLOW_ERROR_TYPE_ITEM,
255                                                   item,
256                                                   "mask enables non supported"
257                                                   " bits");
258         if (!item->spec && (item->mask || item->last))
259                 return rte_flow_error_set(error, EINVAL,
260                                           RTE_FLOW_ERROR_TYPE_ITEM,
261                                           item,
262                                           "mask/last without a spec is not"
263                                           " supported");
264         if (item->spec && item->last) {
265                 uint8_t spec[size];
266                 uint8_t last[size];
267                 unsigned int i;
268                 int ret;
269
270                 for (i = 0; i < size; ++i) {
271                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
272                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
273                 }
274                 ret = memcmp(spec, last, size);
275                 if (ret != 0)
276                         return rte_flow_error_set(error, ENOTSUP,
277                                                   RTE_FLOW_ERROR_TYPE_ITEM,
278                                                   item,
279                                                   "range is not supported");
280         }
281         return 0;
282 }
283
284 /**
285  * Add a verbs specification into @p flow.
286  *
287  * @param[in, out] flow
288  *   Pointer to flow structure.
289  * @param[in] src
290  *   Create specification.
291  * @param[in] size
292  *   Size in bytes of the specification to copy.
293  */
294 static void
295 mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
296 {
297         if (flow->verbs.specs) {
298                 void *dst;
299
300                 dst = (void *)(flow->verbs.specs + flow->verbs.size);
301                 memcpy(dst, src, size);
302                 ++flow->verbs.attr->num_of_specs;
303         }
304         flow->verbs.size += size;
305 }
306
307 /**
308  * Convert the @p item into a Verbs specification after ensuring the NIC
309  * will understand and process it correctly.
310  * If the necessary size for the conversion is greater than the @p flow_size,
311  * nothing is written in @p flow, the validation is still performed.
312  *
313  * @param[in] item
314  *   Item specification.
315  * @param[in, out] flow
316  *   Pointer to flow structure.
317  * @param[in] flow_size
318  *   Size in bytes of the available space in @p flow, if too small, nothing is
319  *   written.
320  * @param[out] error
321  *   Pointer to error structure.
322  *
323  * @return
324  *   On success the number of bytes consumed/necessary, if the returned value
325  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
326  *   otherwise another call with this returned memory size should be done.
327  *   On error, a negative errno value is returned and rte_errno is set.
328  */
329 static int
330 mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
331                    const size_t flow_size, struct rte_flow_error *error)
332 {
333         const struct rte_flow_item_eth *spec = item->spec;
334         const struct rte_flow_item_eth *mask = item->mask;
335         const struct rte_flow_item_eth nic_mask = {
336                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
337                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
338                 .type = RTE_BE16(0xffff),
339         };
340         const unsigned int size = sizeof(struct ibv_flow_spec_eth);
341         struct ibv_flow_spec_eth eth = {
342                 .type = IBV_FLOW_SPEC_ETH,
343                 .size = size,
344         };
345         int ret;
346
347         if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)
348                 return rte_flow_error_set(error, ENOTSUP,
349                                           RTE_FLOW_ERROR_TYPE_ITEM,
350                                           item,
351                                           "L2 layers already configured");
352         if (!mask)
353                 mask = &rte_flow_item_eth_mask;
354         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
355                                         (const uint8_t *)&nic_mask,
356                                         sizeof(struct rte_flow_item_eth),
357                                         error);
358         if (ret)
359                 return ret;
360         flow->layers |= MLX5_FLOW_LAYER_OUTER_L2;
361         if (size > flow_size)
362                 return size;
363         if (spec) {
364                 unsigned int i;
365
366                 memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
367                 memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
368                 eth.val.ether_type = spec->type;
369                 memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
370                 memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
371                 eth.mask.ether_type = mask->type;
372                 /* Remove unwanted bits from values. */
373                 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
374                         eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
375                         eth.val.src_mac[i] &= eth.mask.src_mac[i];
376                 }
377                 eth.val.ether_type &= eth.mask.ether_type;
378         }
379         mlx5_flow_spec_verbs_add(flow, &eth, size);
380         return size;
381 }
382
383 /**
384  * Convert the @p pattern into a Verbs specifications after ensuring the NIC
385  * will understand and process it correctly.
386  * The conversion is performed item per item, each of them is written into
387  * the @p flow if its size is lesser or equal to @p flow_size.
388  * Validation and memory consumption computation are still performed until the
389  * end of @p pattern, unless an error is encountered.
390  *
391  * @param[in] pattern
392  *   Flow pattern.
393  * @param[in, out] flow
394  *   Pointer to the rte_flow structure.
395  * @param[in] flow_size
396  *   Size in bytes of the available space in @p flow, if too small some
397  *   garbage may be present.
398  * @param[out] error
399  *   Pointer to error structure.
400  *
401  * @return
402  *   On success the number of bytes consumed/necessary, if the returned value
403  *   is lesser or equal to @p flow_size, the @pattern  has fully been
404  *   converted, otherwise another call with this returned memory size should
405  *   be done.
406  *   On error, a negative errno value is returned and rte_errno is set.
407  */
408 static int
409 mlx5_flow_items(const struct rte_flow_item pattern[],
410                 struct rte_flow *flow, const size_t flow_size,
411                 struct rte_flow_error *error)
412 {
413         int remain = flow_size;
414         size_t size = 0;
415
416         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
417                 int ret = 0;
418
419                 switch (pattern->type) {
420                 case RTE_FLOW_ITEM_TYPE_VOID:
421                         break;
422                 case RTE_FLOW_ITEM_TYPE_ETH:
423                         ret = mlx5_flow_item_eth(pattern, flow, remain, error);
424                         break;
425                 default:
426                         return rte_flow_error_set(error, ENOTSUP,
427                                                   RTE_FLOW_ERROR_TYPE_ITEM,
428                                                   pattern,
429                                                   "item not supported");
430                 }
431                 if (ret < 0)
432                         return ret;
433                 if (remain > ret)
434                         remain -= ret;
435                 else
436                         remain = 0;
437                 size += ret;
438         }
439         if (!flow->layers) {
440                 const struct rte_flow_item item = {
441                         .type = RTE_FLOW_ITEM_TYPE_ETH,
442                 };
443
444                 return mlx5_flow_item_eth(&item, flow, flow_size, error);
445         }
446         return size;
447 }
448
449 /**
450  * Convert the @p action into a Verbs specification after ensuring the NIC
451  * will understand and process it correctly.
452  * If the necessary size for the conversion is greater than the @p flow_size,
453  * nothing is written in @p flow, the validation is still performed.
454  *
455  * @param[in] action
456  *   Action configuration.
457  * @param[in, out] flow
458  *   Pointer to flow structure.
459  * @param[in] flow_size
460  *   Size in bytes of the available space in @p flow, if too small, nothing is
461  *   written.
462  * @param[out] error
463  *   Pointer to error structure.
464  *
465  * @return
466  *   On success the number of bytes consumed/necessary, if the returned value
467  *   is lesser or equal to @p flow_size, the @p action has fully been
468  *   converted, otherwise another call with this returned memory size should
469  *   be done.
470  *   On error, a negative errno value is returned and rte_errno is set.
471  */
472 static int
473 mlx5_flow_action_drop(const struct rte_flow_action *action,
474                       struct rte_flow *flow, const size_t flow_size,
475                       struct rte_flow_error *error)
476 {
477         unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
478         struct ibv_flow_spec_action_drop drop = {
479                         .type = IBV_FLOW_SPEC_ACTION_DROP,
480                         .size = size,
481         };
482
483         if (flow->fate)
484                 return rte_flow_error_set(error, ENOTSUP,
485                                           RTE_FLOW_ERROR_TYPE_ACTION,
486                                           action,
487                                           "multiple fate actions are not"
488                                           " supported");
489         if (size < flow_size)
490                 mlx5_flow_spec_verbs_add(flow, &drop, size);
491         flow->fate |= MLX5_FLOW_FATE_DROP;
492         return size;
493 }
494
495 /**
496  * Convert the @p action into @p flow after ensuring the NIC will understand
497  * and process it correctly.
498  * The conversion is performed action per action, each of them is written into
499  * the @p flow if its size is lesser or equal to @p flow_size.
500  * Validation and memory consumption computation are still performed until the
501  * end of @p action, unless an error is encountered.
502  *
503  * @param[in] dev
504  *   Pointer to Ethernet device structure.
505  * @param[in] actions
506  *   Pointer to flow actions array.
507  * @param[in, out] flow
508  *   Pointer to the rte_flow structure.
509  * @param[in] flow_size
510  *   Size in bytes of the available space in @p flow, if too small some
511  *   garbage may be present.
512  * @param[out] error
513  *   Pointer to error structure.
514  *
515  * @return
516  *   On success the number of bytes consumed/necessary, if the returned value
517  *   is lesser or equal to @p flow_size, the @p actions has fully been
518  *   converted, otherwise another call with this returned memory size should
519  *   be done.
520  *   On error, a negative errno value is returned and rte_errno is set.
521  */
522 static int
523 mlx5_flow_actions(struct rte_eth_dev *dev __rte_unused,
524                   const struct rte_flow_action actions[],
525                   struct rte_flow *flow, const size_t flow_size,
526                   struct rte_flow_error *error)
527 {
528         size_t size = 0;
529         int remain = flow_size;
530         int ret = 0;
531
532         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
533                 switch (actions->type) {
534                 case RTE_FLOW_ACTION_TYPE_VOID:
535                         break;
536                 case RTE_FLOW_ACTION_TYPE_DROP:
537                         ret = mlx5_flow_action_drop(actions, flow, remain,
538                                                     error);
539                         break;
540                 default:
541                         return rte_flow_error_set(error, ENOTSUP,
542                                                   RTE_FLOW_ERROR_TYPE_ACTION,
543                                                   actions,
544                                                   "action not supported");
545                 }
546                 if (ret < 0)
547                         return ret;
548                 if (remain > ret)
549                         remain -= ret;
550                 else
551                         remain = 0;
552                 size += ret;
553         }
554         if (!flow->fate)
555                 return rte_flow_error_set(error, ENOTSUP,
556                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
557                                           NULL,
558                                           "no fate action found");
559         return size;
560 }
561
562 /**
563  * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
564  * after ensuring the NIC will understand and process it correctly.
565  * The conversion is only performed item/action per item/action, each of
566  * them is written into the @p flow if its size is lesser or equal to @p
567  * flow_size.
568  * Validation and memory consumption computation are still performed until the
569  * end, unless an error is encountered.
570  *
571  * @param[in] dev
572  *   Pointer to Ethernet device.
573  * @param[in, out] flow
574  *   Pointer to flow structure.
575  * @param[in] flow_size
576  *   Size in bytes of the available space in @p flow, if too small some
577  *   garbage may be present.
578  * @param[in] attributes
579  *   Flow rule attributes.
580  * @param[in] pattern
581  *   Pattern specification (list terminated by the END pattern item).
582  * @param[in] actions
583  *   Associated actions (list terminated by the END action).
584  * @param[out] error
585  *   Perform verbose error reporting if not NULL.
586  *
587  * @return
588  *   On success the number of bytes consumed/necessary, if the returned value
589  *   is lesser or equal to @p flow_size, the flow has fully been converted and
590  *   can be applied, otherwise another call with this returned memory size
591  *   should be done.
592  *   On error, a negative errno value is returned and rte_errno is set.
593  */
594 static int
595 mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
596                 const size_t flow_size,
597                 const struct rte_flow_attr *attributes,
598                 const struct rte_flow_item pattern[],
599                 const struct rte_flow_action actions[],
600                 struct rte_flow_error *error)
601 {
602         struct rte_flow local_flow = { .layers = 0, };
603         size_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);
604         int remain = (flow_size > size) ? flow_size - size : 0;
605         int ret;
606
607         if (!remain)
608                 flow = &local_flow;
609         ret = mlx5_flow_attributes(dev, attributes, flow, error);
610         if (ret < 0)
611                 return ret;
612         ret = mlx5_flow_items(pattern, flow, remain, error);
613         if (ret < 0)
614                 return ret;
615         size += ret;
616         remain = (flow_size > size) ? flow_size - size : 0;
617         ret = mlx5_flow_actions(dev, actions, flow, remain, error);
618         if (ret < 0)
619                 return ret;
620         size += ret;
621         if (size <= flow_size)
622                 flow->verbs.attr->priority = flow->attributes.priority;
623         return size;
624 }
625
626 /**
627  * Validate a flow supported by the NIC.
628  *
629  * @see rte_flow_validate()
630  * @see rte_flow_ops
631  */
632 int
633 mlx5_flow_validate(struct rte_eth_dev *dev,
634                    const struct rte_flow_attr *attr,
635                    const struct rte_flow_item items[],
636                    const struct rte_flow_action actions[],
637                    struct rte_flow_error *error)
638 {
639         int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
640
641         if (ret < 0)
642                 return ret;
643         return 0;
644 }
645
646 /**
647  * Remove the flow.
648  *
649  * @param[in] dev
650  *   Pointer to Ethernet device.
651  * @param[in, out] flow
652  *   Pointer to flow structure.
653  */
654 static void
655 mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
656 {
657         if (flow->fate & MLX5_FLOW_FATE_DROP) {
658                 if (flow->verbs.flow) {
659                         claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
660                         flow->verbs.flow = NULL;
661                 }
662         }
663         if (flow->verbs.hrxq) {
664                 mlx5_hrxq_drop_release(dev);
665                 flow->verbs.hrxq = NULL;
666         }
667 }
668
669 /**
670  * Apply the flow.
671  *
672  * @param[in] dev
673  *   Pointer to Ethernet device structure.
674  * @param[in, out] flow
675  *   Pointer to flow structure.
676  * @param[out] error
677  *   Pointer to error structure.
678  *
679  * @return
680  *   0 on success, a negative errno value otherwise and rte_errno is set.
681  */
682 static int
683 mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
684                 struct rte_flow_error *error)
685 {
686         flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
687         if (!flow->verbs.hrxq)
688                 return rte_flow_error_set
689                         (error, errno,
690                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
691                          NULL,
692                          "cannot allocate Drop queue");
693         flow->verbs.flow =
694                 mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
695         if (!flow->verbs.flow) {
696                 mlx5_hrxq_drop_release(dev);
697                 flow->verbs.hrxq = NULL;
698                 return rte_flow_error_set(error, errno,
699                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
700                                           NULL,
701                                           "kernel module refuses to create"
702                                           " flow");
703         }
704         return 0;
705 }
706
707 /**
708  * Create a flow and add it to @p list.
709  *
710  * @param dev
711  *   Pointer to Ethernet device.
712  * @param list
713  *   Pointer to a TAILQ flow list.
714  * @param[in] attr
715  *   Flow rule attributes.
716  * @param[in] items
717  *   Pattern specification (list terminated by the END pattern item).
718  * @param[in] actions
719  *   Associated actions (list terminated by the END action).
720  * @param[out] error
721  *   Perform verbose error reporting if not NULL.
722  *
723  * @return
724  *   A flow on success, NULL otherwise and rte_errno is set.
725  */
726 static struct rte_flow *
727 mlx5_flow_list_create(struct rte_eth_dev *dev,
728                       struct mlx5_flows *list,
729                       const struct rte_flow_attr *attr,
730                       const struct rte_flow_item items[],
731                       const struct rte_flow_action actions[],
732                       struct rte_flow_error *error)
733 {
734         struct rte_flow *flow;
735         size_t size;
736         int ret;
737
738         ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
739         if (ret < 0)
740                 return NULL;
741         size = ret;
742         flow = rte_zmalloc(__func__, size, 0);
743         if (!flow) {
744                 rte_flow_error_set(error, ENOMEM,
745                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
746                                    NULL,
747                                    "cannot allocate memory");
748                 return NULL;
749         }
750         flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
751         flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
752         ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
753         if (ret < 0)
754                 goto error;
755         assert((size_t)ret == size);
756         if (dev->data->dev_started) {
757                 ret = mlx5_flow_apply(dev, flow, error);
758                 if (ret < 0)
759                         goto error;
760         }
761         TAILQ_INSERT_TAIL(list, flow, next);
762         return flow;
763 error:
764         ret = rte_errno; /* Save rte_errno before cleanup. */
765         mlx5_flow_remove(dev, flow);
766         rte_free(flow);
767         rte_errno = ret; /* Restore rte_errno. */
768         return NULL;
769 }
770
771 /**
772  * Create a flow.
773  *
774  * @see rte_flow_create()
775  * @see rte_flow_ops
776  */
777 struct rte_flow *
778 mlx5_flow_create(struct rte_eth_dev *dev,
779                  const struct rte_flow_attr *attr,
780                  const struct rte_flow_item items[],
781                  const struct rte_flow_action actions[],
782                  struct rte_flow_error *error)
783 {
784         return mlx5_flow_list_create
785                 (dev, &((struct priv *)dev->data->dev_private)->flows,
786                  attr, items, actions, error);
787 }
788
789 /**
790  * Destroy a flow in a list.
791  *
792  * @param dev
793  *   Pointer to Ethernet device.
794  * @param list
795  *   Pointer to a TAILQ flow list.
796  * @param[in] flow
797  *   Flow to destroy.
798  */
799 static void
800 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
801                        struct rte_flow *flow)
802 {
803         mlx5_flow_remove(dev, flow);
804         TAILQ_REMOVE(list, flow, next);
805         rte_free(flow);
806 }
807
808 /**
809  * Destroy all flows.
810  *
811  * @param dev
812  *   Pointer to Ethernet device.
813  * @param list
814  *   Pointer to a TAILQ flow list.
815  */
816 void
817 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
818 {
819         while (!TAILQ_EMPTY(list)) {
820                 struct rte_flow *flow;
821
822                 flow = TAILQ_FIRST(list);
823                 mlx5_flow_list_destroy(dev, list, flow);
824         }
825 }
826
827 /**
828  * Remove all flows.
829  *
830  * @param dev
831  *   Pointer to Ethernet device.
832  * @param list
833  *   Pointer to a TAILQ flow list.
834  */
835 void
836 mlx5_flow_stop(struct rte_eth_dev *dev __rte_unused,
837                struct mlx5_flows *list __rte_unused)
838 {
839 }
840
841 /**
842  * Add all flows.
843  *
844  * @param dev
845  *   Pointer to Ethernet device.
846  * @param list
847  *   Pointer to a TAILQ flow list.
848  *
849  * @return
850  *   0 on success, a negative errno value otherwise and rte_errno is set.
851  */
852 int
853 mlx5_flow_start(struct rte_eth_dev *dev __rte_unused,
854                 struct mlx5_flows *list __rte_unused)
855 {
856         return 0;
857 }
858
859 /**
860  * Verify the flow list is empty
861  *
862  * @param dev
863  *  Pointer to Ethernet device.
864  *
865  * @return the number of flows not released.
866  */
867 int
868 mlx5_flow_verify(struct rte_eth_dev *dev)
869 {
870         struct priv *priv = dev->data->dev_private;
871         struct rte_flow *flow;
872         int ret = 0;
873
874         TAILQ_FOREACH(flow, &priv->flows, next) {
875                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
876                         dev->data->port_id, (void *)flow);
877                 ++ret;
878         }
879         return ret;
880 }
881
882 /**
883  * Enable a control flow configured from the control plane.
884  *
885  * @param dev
886  *   Pointer to Ethernet device.
887  * @param eth_spec
888  *   An Ethernet flow spec to apply.
889  * @param eth_mask
890  *   An Ethernet flow mask to apply.
891  * @param vlan_spec
892  *   A VLAN flow spec to apply.
893  * @param vlan_mask
894  *   A VLAN flow mask to apply.
895  *
896  * @return
897  *   0 on success, a negative errno value otherwise and rte_errno is set.
898  */
899 int
900 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
901                     struct rte_flow_item_eth *eth_spec,
902                     struct rte_flow_item_eth *eth_mask,
903                     struct rte_flow_item_vlan *vlan_spec,
904                     struct rte_flow_item_vlan *vlan_mask)
905 {
906         struct priv *priv = dev->data->dev_private;
907         const struct rte_flow_attr attr = {
908                 .ingress = 1,
909                 .priority = priv->config.flow_prio - 1,
910         };
911         struct rte_flow_item items[] = {
912                 {
913                         .type = RTE_FLOW_ITEM_TYPE_ETH,
914                         .spec = eth_spec,
915                         .last = NULL,
916                         .mask = eth_mask,
917                 },
918                 {
919                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
920                                 RTE_FLOW_ITEM_TYPE_END,
921                         .spec = vlan_spec,
922                         .last = NULL,
923                         .mask = vlan_mask,
924                 },
925                 {
926                         .type = RTE_FLOW_ITEM_TYPE_END,
927                 },
928         };
929         uint16_t queue[priv->reta_idx_n];
930         struct rte_flow_action_rss action_rss = {
931                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
932                 .level = 0,
933                 .types = priv->rss_conf.rss_hf,
934                 .key_len = priv->rss_conf.rss_key_len,
935                 .queue_num = priv->reta_idx_n,
936                 .key = priv->rss_conf.rss_key,
937                 .queue = queue,
938         };
939         struct rte_flow_action actions[] = {
940                 {
941                         .type = RTE_FLOW_ACTION_TYPE_RSS,
942                         .conf = &action_rss,
943                 },
944                 {
945                         .type = RTE_FLOW_ACTION_TYPE_END,
946                 },
947         };
948         struct rte_flow *flow;
949         struct rte_flow_error error;
950         unsigned int i;
951
952         if (!priv->reta_idx_n) {
953                 rte_errno = EINVAL;
954                 return -rte_errno;
955         }
956         for (i = 0; i != priv->reta_idx_n; ++i)
957                 queue[i] = (*priv->reta_idx)[i];
958         flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
959                                      actions, &error);
960         if (!flow)
961                 return -rte_errno;
962         return 0;
963 }
964
965 /**
966  * Enable a flow control configured from the control plane.
967  *
968  * @param dev
969  *   Pointer to Ethernet device.
970  * @param eth_spec
971  *   An Ethernet flow spec to apply.
972  * @param eth_mask
973  *   An Ethernet flow mask to apply.
974  *
975  * @return
976  *   0 on success, a negative errno value otherwise and rte_errno is set.
977  */
978 int
979 mlx5_ctrl_flow(struct rte_eth_dev *dev,
980                struct rte_flow_item_eth *eth_spec,
981                struct rte_flow_item_eth *eth_mask)
982 {
983         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
984 }
985
986 /**
987  * Destroy a flow.
988  *
989  * @see rte_flow_destroy()
990  * @see rte_flow_ops
991  */
992 int
993 mlx5_flow_destroy(struct rte_eth_dev *dev,
994                   struct rte_flow *flow,
995                   struct rte_flow_error *error __rte_unused)
996 {
997         struct priv *priv = dev->data->dev_private;
998
999         mlx5_flow_list_destroy(dev, &priv->flows, flow);
1000         return 0;
1001 }
1002
1003 /**
1004  * Destroy all flows.
1005  *
1006  * @see rte_flow_flush()
1007  * @see rte_flow_ops
1008  */
1009 int
1010 mlx5_flow_flush(struct rte_eth_dev *dev,
1011                 struct rte_flow_error *error __rte_unused)
1012 {
1013         struct priv *priv = dev->data->dev_private;
1014
1015         mlx5_flow_list_flush(dev, &priv->flows);
1016         return 0;
1017 }
1018
1019 /**
1020  * Isolated mode.
1021  *
1022  * @see rte_flow_isolate()
1023  * @see rte_flow_ops
1024  */
1025 int
1026 mlx5_flow_isolate(struct rte_eth_dev *dev,
1027                   int enable,
1028                   struct rte_flow_error *error)
1029 {
1030         struct priv *priv = dev->data->dev_private;
1031
1032         if (dev->data->dev_started) {
1033                 rte_flow_error_set(error, EBUSY,
1034                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1035                                    NULL,
1036                                    "port must be stopped first");
1037                 return -rte_errno;
1038         }
1039         priv->isolated = !!enable;
1040         if (enable)
1041                 dev->dev_ops = &mlx5_dev_ops_isolate;
1042         else
1043                 dev->dev_ops = &mlx5_dev_ops;
1044         return 0;
1045 }
1046
1047 /**
1048  * Convert a flow director filter to a generic flow.
1049  *
1050  * @param dev
1051  *   Pointer to Ethernet device.
1052  * @param fdir_filter
1053  *   Flow director filter to add.
1054  * @param attributes
1055  *   Generic flow parameters structure.
1056  *
1057  * @return
1058  *   0 on success, a negative errno value otherwise and rte_errno is set.
1059  */
1060 static int
1061 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
1062                          const struct rte_eth_fdir_filter *fdir_filter,
1063                          struct mlx5_fdir *attributes)
1064 {
1065         struct priv *priv = dev->data->dev_private;
1066         const struct rte_eth_fdir_input *input = &fdir_filter->input;
1067         const struct rte_eth_fdir_masks *mask =
1068                 &dev->data->dev_conf.fdir_conf.mask;
1069
1070         /* Validate queue number. */
1071         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
1072                 DRV_LOG(ERR, "port %u invalid queue number %d",
1073                         dev->data->port_id, fdir_filter->action.rx_queue);
1074                 rte_errno = EINVAL;
1075                 return -rte_errno;
1076         }
1077         attributes->attr.ingress = 1;
1078         attributes->items[0] = (struct rte_flow_item) {
1079                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1080                 .spec = &attributes->l2,
1081                 .mask = &attributes->l2_mask,
1082         };
1083         switch (fdir_filter->action.behavior) {
1084         case RTE_ETH_FDIR_ACCEPT:
1085                 attributes->actions[0] = (struct rte_flow_action){
1086                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1087                         .conf = &attributes->queue,
1088                 };
1089                 break;
1090         case RTE_ETH_FDIR_REJECT:
1091                 attributes->actions[0] = (struct rte_flow_action){
1092                         .type = RTE_FLOW_ACTION_TYPE_DROP,
1093                 };
1094                 break;
1095         default:
1096                 DRV_LOG(ERR, "port %u invalid behavior %d",
1097                         dev->data->port_id,
1098                         fdir_filter->action.behavior);
1099                 rte_errno = ENOTSUP;
1100                 return -rte_errno;
1101         }
1102         attributes->queue.index = fdir_filter->action.rx_queue;
1103         /* Handle L3. */
1104         switch (fdir_filter->input.flow_type) {
1105         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1106         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1107         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1108                 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
1109                         .src_addr = input->flow.ip4_flow.src_ip,
1110                         .dst_addr = input->flow.ip4_flow.dst_ip,
1111                         .time_to_live = input->flow.ip4_flow.ttl,
1112                         .type_of_service = input->flow.ip4_flow.tos,
1113                         .next_proto_id = input->flow.ip4_flow.proto,
1114                 };
1115                 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
1116                         .src_addr = mask->ipv4_mask.src_ip,
1117                         .dst_addr = mask->ipv4_mask.dst_ip,
1118                         .time_to_live = mask->ipv4_mask.ttl,
1119                         .type_of_service = mask->ipv4_mask.tos,
1120                         .next_proto_id = mask->ipv4_mask.proto,
1121                 };
1122                 attributes->items[1] = (struct rte_flow_item){
1123                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
1124                         .spec = &attributes->l3,
1125                         .mask = &attributes->l3_mask,
1126                 };
1127                 break;
1128         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1129         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1130         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1131                 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
1132                         .hop_limits = input->flow.ipv6_flow.hop_limits,
1133                         .proto = input->flow.ipv6_flow.proto,
1134                 };
1135
1136                 memcpy(attributes->l3.ipv6.hdr.src_addr,
1137                        input->flow.ipv6_flow.src_ip,
1138                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1139                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
1140                        input->flow.ipv6_flow.dst_ip,
1141                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1142                 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
1143                        mask->ipv6_mask.src_ip,
1144                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1145                 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
1146                        mask->ipv6_mask.dst_ip,
1147                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1148                 attributes->items[1] = (struct rte_flow_item){
1149                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
1150                         .spec = &attributes->l3,
1151                         .mask = &attributes->l3_mask,
1152                 };
1153                 break;
1154         default:
1155                 DRV_LOG(ERR, "port %u invalid flow type%d",
1156                         dev->data->port_id, fdir_filter->input.flow_type);
1157                 rte_errno = ENOTSUP;
1158                 return -rte_errno;
1159         }
1160         /* Handle L4. */
1161         switch (fdir_filter->input.flow_type) {
1162         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1163                 attributes->l4.udp.hdr = (struct udp_hdr){
1164                         .src_port = input->flow.udp4_flow.src_port,
1165                         .dst_port = input->flow.udp4_flow.dst_port,
1166                 };
1167                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1168                         .src_port = mask->src_port_mask,
1169                         .dst_port = mask->dst_port_mask,
1170                 };
1171                 attributes->items[2] = (struct rte_flow_item){
1172                         .type = RTE_FLOW_ITEM_TYPE_UDP,
1173                         .spec = &attributes->l4,
1174                         .mask = &attributes->l4_mask,
1175                 };
1176                 break;
1177         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1178                 attributes->l4.tcp.hdr = (struct tcp_hdr){
1179                         .src_port = input->flow.tcp4_flow.src_port,
1180                         .dst_port = input->flow.tcp4_flow.dst_port,
1181                 };
1182                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1183                         .src_port = mask->src_port_mask,
1184                         .dst_port = mask->dst_port_mask,
1185                 };
1186                 attributes->items[2] = (struct rte_flow_item){
1187                         .type = RTE_FLOW_ITEM_TYPE_TCP,
1188                         .spec = &attributes->l4,
1189                         .mask = &attributes->l4_mask,
1190                 };
1191                 break;
1192         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1193                 attributes->l4.udp.hdr = (struct udp_hdr){
1194                         .src_port = input->flow.udp6_flow.src_port,
1195                         .dst_port = input->flow.udp6_flow.dst_port,
1196                 };
1197                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1198                         .src_port = mask->src_port_mask,
1199                         .dst_port = mask->dst_port_mask,
1200                 };
1201                 attributes->items[2] = (struct rte_flow_item){
1202                         .type = RTE_FLOW_ITEM_TYPE_UDP,
1203                         .spec = &attributes->l4,
1204                         .mask = &attributes->l4_mask,
1205                 };
1206                 break;
1207         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1208                 attributes->l4.tcp.hdr = (struct tcp_hdr){
1209                         .src_port = input->flow.tcp6_flow.src_port,
1210                         .dst_port = input->flow.tcp6_flow.dst_port,
1211                 };
1212                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1213                         .src_port = mask->src_port_mask,
1214                         .dst_port = mask->dst_port_mask,
1215                 };
1216                 attributes->items[2] = (struct rte_flow_item){
1217                         .type = RTE_FLOW_ITEM_TYPE_TCP,
1218                         .spec = &attributes->l4,
1219                         .mask = &attributes->l4_mask,
1220                 };
1221                 break;
1222         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1223         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1224                 break;
1225         default:
1226                 DRV_LOG(ERR, "port %u invalid flow type%d",
1227                         dev->data->port_id, fdir_filter->input.flow_type);
1228                 rte_errno = ENOTSUP;
1229                 return -rte_errno;
1230         }
1231         return 0;
1232 }
1233
1234 /**
1235  * Add new flow director filter and store it in list.
1236  *
1237  * @param dev
1238  *   Pointer to Ethernet device.
1239  * @param fdir_filter
1240  *   Flow director filter to add.
1241  *
1242  * @return
1243  *   0 on success, a negative errno value otherwise and rte_errno is set.
1244  */
1245 static int
1246 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
1247                      const struct rte_eth_fdir_filter *fdir_filter)
1248 {
1249         struct priv *priv = dev->data->dev_private;
1250         struct mlx5_fdir attributes = {
1251                 .attr.group = 0,
1252                 .l2_mask = {
1253                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1254                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1255                         .type = 0,
1256                 },
1257         };
1258         struct rte_flow_error error;
1259         struct rte_flow *flow;
1260         int ret;
1261
1262         ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
1263         if (ret)
1264                 return ret;
1265         flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
1266                                      attributes.items, attributes.actions,
1267                                      &error);
1268         if (flow) {
1269                 DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
1270                         (void *)flow);
1271                 return 0;
1272         }
1273         return -rte_errno;
1274 }
1275
1276 /**
1277  * Delete specific filter.
1278  *
1279  * @param dev
1280  *   Pointer to Ethernet device.
1281  * @param fdir_filter
1282  *   Filter to be deleted.
1283  *
1284  * @return
1285  *   0 on success, a negative errno value otherwise and rte_errno is set.
1286  */
1287 static int
1288 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
1289                         const struct rte_eth_fdir_filter *fdir_filter
1290                         __rte_unused)
1291 {
1292         rte_errno = ENOTSUP;
1293         return -rte_errno;
1294 }
1295
1296 /**
1297  * Update queue for specific filter.
1298  *
1299  * @param dev
1300  *   Pointer to Ethernet device.
1301  * @param fdir_filter
1302  *   Filter to be updated.
1303  *
1304  * @return
1305  *   0 on success, a negative errno value otherwise and rte_errno is set.
1306  */
1307 static int
1308 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
1309                         const struct rte_eth_fdir_filter *fdir_filter)
1310 {
1311         int ret;
1312
1313         ret = mlx5_fdir_filter_delete(dev, fdir_filter);
1314         if (ret)
1315                 return ret;
1316         return mlx5_fdir_filter_add(dev, fdir_filter);
1317 }
1318
1319 /**
1320  * Flush all filters.
1321  *
1322  * @param dev
1323  *   Pointer to Ethernet device.
1324  */
1325 static void
1326 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
1327 {
1328         struct priv *priv = dev->data->dev_private;
1329
1330         mlx5_flow_list_flush(dev, &priv->flows);
1331 }
1332
1333 /**
1334  * Get flow director information.
1335  *
1336  * @param dev
1337  *   Pointer to Ethernet device.
1338  * @param[out] fdir_info
1339  *   Resulting flow director information.
1340  */
1341 static void
1342 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1343 {
1344         struct rte_eth_fdir_masks *mask =
1345                 &dev->data->dev_conf.fdir_conf.mask;
1346
1347         fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1348         fdir_info->guarant_spc = 0;
1349         rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
1350         fdir_info->max_flexpayload = 0;
1351         fdir_info->flow_types_mask[0] = 0;
1352         fdir_info->flex_payload_unit = 0;
1353         fdir_info->max_flex_payload_segment_num = 0;
1354         fdir_info->flex_payload_limit = 0;
1355         memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
1356 }
1357
1358 /**
1359  * Deal with flow director operations.
1360  *
1361  * @param dev
1362  *   Pointer to Ethernet device.
1363  * @param filter_op
1364  *   Operation to perform.
1365  * @param arg
1366  *   Pointer to operation-specific structure.
1367  *
1368  * @return
1369  *   0 on success, a negative errno value otherwise and rte_errno is set.
1370  */
1371 static int
1372 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1373                     void *arg)
1374 {
1375         enum rte_fdir_mode fdir_mode =
1376                 dev->data->dev_conf.fdir_conf.mode;
1377
1378         if (filter_op == RTE_ETH_FILTER_NOP)
1379                 return 0;
1380         if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
1381             fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1382                 DRV_LOG(ERR, "port %u flow director mode %d not supported",
1383                         dev->data->port_id, fdir_mode);
1384                 rte_errno = EINVAL;
1385                 return -rte_errno;
1386         }
1387         switch (filter_op) {
1388         case RTE_ETH_FILTER_ADD:
1389                 return mlx5_fdir_filter_add(dev, arg);
1390         case RTE_ETH_FILTER_UPDATE:
1391                 return mlx5_fdir_filter_update(dev, arg);
1392         case RTE_ETH_FILTER_DELETE:
1393                 return mlx5_fdir_filter_delete(dev, arg);
1394         case RTE_ETH_FILTER_FLUSH:
1395                 mlx5_fdir_filter_flush(dev);
1396                 break;
1397         case RTE_ETH_FILTER_INFO:
1398                 mlx5_fdir_info_get(dev, arg);
1399                 break;
1400         default:
1401                 DRV_LOG(DEBUG, "port %u unknown operation %u",
1402                         dev->data->port_id, filter_op);
1403                 rte_errno = EINVAL;
1404                 return -rte_errno;
1405         }
1406         return 0;
1407 }
1408
1409 /**
1410  * Manage filter operations.
1411  *
1412  * @param dev
1413  *   Pointer to Ethernet device structure.
1414  * @param filter_type
1415  *   Filter type.
1416  * @param filter_op
1417  *   Operation to perform.
1418  * @param arg
1419  *   Pointer to operation-specific structure.
1420  *
1421  * @return
1422  *   0 on success, a negative errno value otherwise and rte_errno is set.
1423  */
1424 int
1425 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
1426                      enum rte_filter_type filter_type,
1427                      enum rte_filter_op filter_op,
1428                      void *arg)
1429 {
1430         switch (filter_type) {
1431         case RTE_ETH_FILTER_GENERIC:
1432                 if (filter_op != RTE_ETH_FILTER_GET) {
1433                         rte_errno = EINVAL;
1434                         return -rte_errno;
1435                 }
1436                 *(const void **)arg = &mlx5_flow_ops;
1437                 return 0;
1438         case RTE_ETH_FILTER_FDIR:
1439                 return mlx5_fdir_ctrl_func(dev, filter_op, arg);
1440         default:
1441                 DRV_LOG(ERR, "port %u filter type (%d) not supported",
1442                         dev->data->port_id, filter_type);
1443                 rte_errno = ENOTSUP;
1444                 return -rte_errno;
1445         }
1446         return 0;
1447 }