net/mlx5: add flow VLAN item
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <sys/queue.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_prm.h"
32 #include "mlx5_glue.h"
33
34 /* Dev ops structure defined in mlx5.c */
35 extern const struct eth_dev_ops mlx5_dev_ops;
36 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
37
38 /* Pattern Layer bits. */
39 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
40 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
41 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
42 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
43 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
44 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
45 /* Masks. */
46 #define MLX5_FLOW_LAYER_OUTER_L3 \
47         (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
48 #define MLX5_FLOW_LAYER_OUTER_L4 \
49         (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
50
51 /* Actions that modify the fate of matching traffic. */
52 #define MLX5_FLOW_FATE_DROP (1u << 0)
53 #define MLX5_FLOW_FATE_QUEUE (1u << 1)
54
55 /** Handles information leading to a drop fate. */
56 struct mlx5_flow_verbs {
57         unsigned int size; /**< Size of the attribute. */
58         struct {
59                 struct ibv_flow_attr *attr;
60                 /**< Pointer to the Specification buffer. */
61                 uint8_t *specs; /**< Pointer to the specifications. */
62         };
63         struct ibv_flow *flow; /**< Verbs flow pointer. */
64         struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
65 };
66
67 /* Flow structure. */
68 struct rte_flow {
69         TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
70         struct rte_flow_attr attributes; /**< User flow attribute. */
71         uint32_t layers;
72         /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
73         uint32_t fate;
74         /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
75         struct mlx5_flow_verbs verbs; /* Verbs flow. */
76         uint16_t queue; /**< Destination queue to redirect traffic to. */
77 };
78
79 static const struct rte_flow_ops mlx5_flow_ops = {
80         .validate = mlx5_flow_validate,
81         .create = mlx5_flow_create,
82         .destroy = mlx5_flow_destroy,
83         .flush = mlx5_flow_flush,
84         .isolate = mlx5_flow_isolate,
85 };
86
87 /* Convert FDIR request to Generic flow. */
88 struct mlx5_fdir {
89         struct rte_flow_attr attr;
90         struct rte_flow_action actions[2];
91         struct rte_flow_item items[4];
92         struct rte_flow_item_eth l2;
93         struct rte_flow_item_eth l2_mask;
94         union {
95                 struct rte_flow_item_ipv4 ipv4;
96                 struct rte_flow_item_ipv6 ipv6;
97         } l3;
98         union {
99                 struct rte_flow_item_ipv4 ipv4;
100                 struct rte_flow_item_ipv6 ipv6;
101         } l3_mask;
102         union {
103                 struct rte_flow_item_udp udp;
104                 struct rte_flow_item_tcp tcp;
105         } l4;
106         union {
107                 struct rte_flow_item_udp udp;
108                 struct rte_flow_item_tcp tcp;
109         } l4_mask;
110         struct rte_flow_action_queue queue;
111 };
112
113 /* Verbs specification header. */
114 struct ibv_spec_header {
115         enum ibv_flow_spec_type type;
116         uint16_t size;
117 };
118
119  /**
120   * Discover the maximum number of priority available.
121   *
122   * @param[in] dev
123   *   Pointer to Ethernet device.
124   *
125   * @return
126   *   number of supported flow priority on success, a negative errno value
127   *   otherwise and rte_errno is set.
128   */
129 int
130 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
131 {
132         struct {
133                 struct ibv_flow_attr attr;
134                 struct ibv_flow_spec_eth eth;
135                 struct ibv_flow_spec_action_drop drop;
136         } flow_attr = {
137                 .attr = {
138                         .num_of_specs = 2,
139                 },
140                 .eth = {
141                         .type = IBV_FLOW_SPEC_ETH,
142                         .size = sizeof(struct ibv_flow_spec_eth),
143                 },
144                 .drop = {
145                         .size = sizeof(struct ibv_flow_spec_action_drop),
146                         .type = IBV_FLOW_SPEC_ACTION_DROP,
147                 },
148         };
149         struct ibv_flow *flow;
150         struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
151         uint16_t vprio[] = { 8, 16 };
152         int i;
153
154         if (!drop) {
155                 rte_errno = ENOTSUP;
156                 return -rte_errno;
157         }
158         for (i = 0; i != RTE_DIM(vprio); i++) {
159                 flow_attr.attr.priority = vprio[i] - 1;
160                 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
161                 if (!flow)
162                         break;
163                 claim_zero(mlx5_glue->destroy_flow(flow));
164         }
165         mlx5_hrxq_drop_release(dev);
166         DRV_LOG(INFO, "port %u flow maximum priority: %d",
167                 dev->data->port_id, vprio[i - 1]);
168         return vprio[i - 1];
169 }
170
171 /**
172  * Verify the @p attributes will be correctly understood by the NIC and store
173  * them in the @p flow if everything is correct.
174  *
175  * @param[in] dev
176  *   Pointer to Ethernet device.
177  * @param[in] attributes
178  *   Pointer to flow attributes
179  * @param[in, out] flow
180  *   Pointer to the rte_flow structure.
181  * @param[out] error
182  *   Pointer to error structure.
183  *
184  * @return
185  *   0 on success, a negative errno value otherwise and rte_errno is set.
186  */
187 static int
188 mlx5_flow_attributes(struct rte_eth_dev *dev,
189                      const struct rte_flow_attr *attributes,
190                      struct rte_flow *flow,
191                      struct rte_flow_error *error)
192 {
193         uint32_t priority_max =
194                 ((struct priv *)dev->data->dev_private)->config.flow_prio;
195
196         if (attributes->group)
197                 return rte_flow_error_set(error, ENOTSUP,
198                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
199                                           NULL,
200                                           "groups is not supported");
201         if (attributes->priority >= priority_max)
202                 return rte_flow_error_set(error, ENOTSUP,
203                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
204                                           NULL,
205                                           "priority out of range");
206         if (attributes->egress)
207                 return rte_flow_error_set(error, ENOTSUP,
208                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
209                                           NULL,
210                                           "egress is not supported");
211         if (attributes->transfer)
212                 return rte_flow_error_set(error, ENOTSUP,
213                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
214                                           NULL,
215                                           "transfer is not supported");
216         if (!attributes->ingress)
217                 return rte_flow_error_set(error, ENOTSUP,
218                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
219                                           NULL,
220                                           "ingress attribute is mandatory");
221         flow->attributes = *attributes;
222         return 0;
223 }
224
225 /**
226  * Verify the @p item specifications (spec, last, mask) are compatible with the
227  * NIC capabilities.
228  *
229  * @param[in] item
230  *   Item specification.
231  * @param[in] mask
232  *   @p item->mask or flow default bit-masks.
233  * @param[in] nic_mask
234  *   Bit-masks covering supported fields by the NIC to compare with user mask.
235  * @param[in] size
236  *   Bit-masks size in bytes.
237  * @param[out] error
238  *   Pointer to error structure.
239  *
240  * @return
241  *   0 on success, a negative errno value otherwise and rte_errno is set.
242  */
243 static int
244 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
245                           const uint8_t *mask,
246                           const uint8_t *nic_mask,
247                           unsigned int size,
248                           struct rte_flow_error *error)
249 {
250         unsigned int i;
251
252         assert(nic_mask);
253         for (i = 0; i < size; ++i)
254                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
255                         return rte_flow_error_set(error, ENOTSUP,
256                                                   RTE_FLOW_ERROR_TYPE_ITEM,
257                                                   item,
258                                                   "mask enables non supported"
259                                                   " bits");
260         if (!item->spec && (item->mask || item->last))
261                 return rte_flow_error_set(error, EINVAL,
262                                           RTE_FLOW_ERROR_TYPE_ITEM,
263                                           item,
264                                           "mask/last without a spec is not"
265                                           " supported");
266         if (item->spec && item->last) {
267                 uint8_t spec[size];
268                 uint8_t last[size];
269                 unsigned int i;
270                 int ret;
271
272                 for (i = 0; i < size; ++i) {
273                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
274                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
275                 }
276                 ret = memcmp(spec, last, size);
277                 if (ret != 0)
278                         return rte_flow_error_set(error, ENOTSUP,
279                                                   RTE_FLOW_ERROR_TYPE_ITEM,
280                                                   item,
281                                                   "range is not supported");
282         }
283         return 0;
284 }
285
286 /**
287  * Add a verbs specification into @p flow.
288  *
289  * @param[in, out] flow
290  *   Pointer to flow structure.
291  * @param[in] src
292  *   Create specification.
293  * @param[in] size
294  *   Size in bytes of the specification to copy.
295  */
296 static void
297 mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
298 {
299         if (flow->verbs.specs) {
300                 void *dst;
301
302                 dst = (void *)(flow->verbs.specs + flow->verbs.size);
303                 memcpy(dst, src, size);
304                 ++flow->verbs.attr->num_of_specs;
305         }
306         flow->verbs.size += size;
307 }
308
309 /**
310  * Convert the @p item into a Verbs specification after ensuring the NIC
311  * will understand and process it correctly.
312  * If the necessary size for the conversion is greater than the @p flow_size,
313  * nothing is written in @p flow, the validation is still performed.
314  *
315  * @param[in] item
316  *   Item specification.
317  * @param[in, out] flow
318  *   Pointer to flow structure.
319  * @param[in] flow_size
320  *   Size in bytes of the available space in @p flow, if too small, nothing is
321  *   written.
322  * @param[out] error
323  *   Pointer to error structure.
324  *
325  * @return
326  *   On success the number of bytes consumed/necessary, if the returned value
327  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
328  *   otherwise another call with this returned memory size should be done.
329  *   On error, a negative errno value is returned and rte_errno is set.
330  */
331 static int
332 mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
333                    const size_t flow_size, struct rte_flow_error *error)
334 {
335         const struct rte_flow_item_eth *spec = item->spec;
336         const struct rte_flow_item_eth *mask = item->mask;
337         const struct rte_flow_item_eth nic_mask = {
338                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
339                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
340                 .type = RTE_BE16(0xffff),
341         };
342         const unsigned int size = sizeof(struct ibv_flow_spec_eth);
343         struct ibv_flow_spec_eth eth = {
344                 .type = IBV_FLOW_SPEC_ETH,
345                 .size = size,
346         };
347         int ret;
348
349         if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)
350                 return rte_flow_error_set(error, ENOTSUP,
351                                           RTE_FLOW_ERROR_TYPE_ITEM,
352                                           item,
353                                           "L2 layers already configured");
354         if (!mask)
355                 mask = &rte_flow_item_eth_mask;
356         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
357                                         (const uint8_t *)&nic_mask,
358                                         sizeof(struct rte_flow_item_eth),
359                                         error);
360         if (ret)
361                 return ret;
362         flow->layers |= MLX5_FLOW_LAYER_OUTER_L2;
363         if (size > flow_size)
364                 return size;
365         if (spec) {
366                 unsigned int i;
367
368                 memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
369                 memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
370                 eth.val.ether_type = spec->type;
371                 memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
372                 memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
373                 eth.mask.ether_type = mask->type;
374                 /* Remove unwanted bits from values. */
375                 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
376                         eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
377                         eth.val.src_mac[i] &= eth.mask.src_mac[i];
378                 }
379                 eth.val.ether_type &= eth.mask.ether_type;
380         }
381         mlx5_flow_spec_verbs_add(flow, &eth, size);
382         return size;
383 }
384
385 /**
386  * Update the VLAN tag in the Verbs Ethernet specification.
387  *
388  * @param[in, out] attr
389  *   Pointer to Verbs attributes structure.
390  * @param[in] eth
391  *   Verbs structure containing the VLAN information to copy.
392  */
393 static void
394 mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
395                            struct ibv_flow_spec_eth *eth)
396 {
397         unsigned int i;
398         enum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH;
399         struct ibv_spec_header *hdr = (struct ibv_spec_header *)
400                 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
401
402         for (i = 0; i != attr->num_of_specs; ++i) {
403                 if (hdr->type == search) {
404                         struct ibv_flow_spec_eth *e =
405                                 (struct ibv_flow_spec_eth *)hdr;
406
407                         e->val.vlan_tag = eth->val.vlan_tag;
408                         e->mask.vlan_tag = eth->mask.vlan_tag;
409                         e->val.ether_type = eth->val.ether_type;
410                         e->mask.ether_type = eth->mask.ether_type;
411                         break;
412                 }
413                 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
414         }
415 }
416
417 /**
418  * Convert the @p item into @p flow (or by updating the already present
419  * Ethernet Verbs) specification after ensuring the NIC will understand and
420  * process it correctly.
421  * If the necessary size for the conversion is greater than the @p flow_size,
422  * nothing is written in @p flow, the validation is still performed.
423  *
424  * @param[in] item
425  *   Item specification.
426  * @param[in, out] flow
427  *   Pointer to flow structure.
428  * @param[in] flow_size
429  *   Size in bytes of the available space in @p flow, if too small, nothing is
430  *   written.
431  * @param[out] error
432  *   Pointer to error structure.
433  *
434  * @return
435  *   On success the number of bytes consumed/necessary, if the returned value
436  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
437  *   otherwise another call with this returned memory size should be done.
438  *   On error, a negative errno value is returned and rte_errno is set.
439  */
440 static int
441 mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
442                     const size_t flow_size, struct rte_flow_error *error)
443 {
444         const struct rte_flow_item_vlan *spec = item->spec;
445         const struct rte_flow_item_vlan *mask = item->mask;
446         const struct rte_flow_item_vlan nic_mask = {
447                 .tci = RTE_BE16(0x0fff),
448                 .inner_type = RTE_BE16(0xffff),
449         };
450         unsigned int size = sizeof(struct ibv_flow_spec_eth);
451         struct ibv_flow_spec_eth eth = {
452                 .type = IBV_FLOW_SPEC_ETH,
453                 .size = size,
454         };
455         int ret;
456         const uint32_t l34m = MLX5_FLOW_LAYER_OUTER_L3 |
457                         MLX5_FLOW_LAYER_OUTER_L4;
458         const uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN;
459         const uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2;
460
461         if (flow->layers & vlanm)
462                 return rte_flow_error_set(error, ENOTSUP,
463                                           RTE_FLOW_ERROR_TYPE_ITEM,
464                                           item,
465                                           "VLAN layer already configured");
466         else if ((flow->layers & l34m) != 0)
467                 return rte_flow_error_set(error, ENOTSUP,
468                                           RTE_FLOW_ERROR_TYPE_ITEM,
469                                           item,
470                                           "L2 layer cannot follow L3/L4 layer");
471         if (!mask)
472                 mask = &rte_flow_item_vlan_mask;
473         ret = mlx5_flow_item_acceptable
474                 (item, (const uint8_t *)mask,
475                  (const uint8_t *)&nic_mask,
476                  sizeof(struct rte_flow_item_vlan), error);
477         if (ret)
478                 return ret;
479         if (spec) {
480                 eth.val.vlan_tag = spec->tci;
481                 eth.mask.vlan_tag = mask->tci;
482                 eth.val.vlan_tag &= eth.mask.vlan_tag;
483                 eth.val.ether_type = spec->inner_type;
484                 eth.mask.ether_type = mask->inner_type;
485                 eth.val.ether_type &= eth.mask.ether_type;
486         }
487         /*
488          * From verbs perspective an empty VLAN is equivalent
489          * to a packet without VLAN layer.
490          */
491         if (!eth.mask.vlan_tag)
492                 return rte_flow_error_set(error, EINVAL,
493                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
494                                           item->spec,
495                                           "VLAN cannot be empty");
496         if (!(flow->layers & l2m)) {
497                 if (size <= flow_size)
498                         mlx5_flow_spec_verbs_add(flow, &eth, size);
499         } else {
500                 if (flow->verbs.attr)
501                         mlx5_flow_item_vlan_update(flow->verbs.attr, &eth);
502                 size = 0; /* Only an update is done in eth specification. */
503         }
504         flow->layers |= MLX5_FLOW_LAYER_OUTER_L2 |
505                 MLX5_FLOW_LAYER_OUTER_VLAN;
506         return size;
507 }
508
509 /**
510  * Convert the @p pattern into a Verbs specifications after ensuring the NIC
511  * will understand and process it correctly.
512  * The conversion is performed item per item, each of them is written into
513  * the @p flow if its size is lesser or equal to @p flow_size.
514  * Validation and memory consumption computation are still performed until the
515  * end of @p pattern, unless an error is encountered.
516  *
517  * @param[in] pattern
518  *   Flow pattern.
519  * @param[in, out] flow
520  *   Pointer to the rte_flow structure.
521  * @param[in] flow_size
522  *   Size in bytes of the available space in @p flow, if too small some
523  *   garbage may be present.
524  * @param[out] error
525  *   Pointer to error structure.
526  *
527  * @return
528  *   On success the number of bytes consumed/necessary, if the returned value
529  *   is lesser or equal to @p flow_size, the @pattern  has fully been
530  *   converted, otherwise another call with this returned memory size should
531  *   be done.
532  *   On error, a negative errno value is returned and rte_errno is set.
533  */
534 static int
535 mlx5_flow_items(const struct rte_flow_item pattern[],
536                 struct rte_flow *flow, const size_t flow_size,
537                 struct rte_flow_error *error)
538 {
539         int remain = flow_size;
540         size_t size = 0;
541
542         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
543                 int ret = 0;
544
545                 switch (pattern->type) {
546                 case RTE_FLOW_ITEM_TYPE_VOID:
547                         break;
548                 case RTE_FLOW_ITEM_TYPE_ETH:
549                         ret = mlx5_flow_item_eth(pattern, flow, remain, error);
550                         break;
551                 case RTE_FLOW_ITEM_TYPE_VLAN:
552                         ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
553                         break;
554                 default:
555                         return rte_flow_error_set(error, ENOTSUP,
556                                                   RTE_FLOW_ERROR_TYPE_ITEM,
557                                                   pattern,
558                                                   "item not supported");
559                 }
560                 if (ret < 0)
561                         return ret;
562                 if (remain > ret)
563                         remain -= ret;
564                 else
565                         remain = 0;
566                 size += ret;
567         }
568         if (!flow->layers) {
569                 const struct rte_flow_item item = {
570                         .type = RTE_FLOW_ITEM_TYPE_ETH,
571                 };
572
573                 return mlx5_flow_item_eth(&item, flow, flow_size, error);
574         }
575         return size;
576 }
577
578 /**
579  * Convert the @p action into a Verbs specification after ensuring the NIC
580  * will understand and process it correctly.
581  * If the necessary size for the conversion is greater than the @p flow_size,
582  * nothing is written in @p flow, the validation is still performed.
583  *
584  * @param[in] action
585  *   Action configuration.
586  * @param[in, out] flow
587  *   Pointer to flow structure.
588  * @param[in] flow_size
589  *   Size in bytes of the available space in @p flow, if too small, nothing is
590  *   written.
591  * @param[out] error
592  *   Pointer to error structure.
593  *
594  * @return
595  *   On success the number of bytes consumed/necessary, if the returned value
596  *   is lesser or equal to @p flow_size, the @p action has fully been
597  *   converted, otherwise another call with this returned memory size should
598  *   be done.
599  *   On error, a negative errno value is returned and rte_errno is set.
600  */
601 static int
602 mlx5_flow_action_drop(const struct rte_flow_action *action,
603                       struct rte_flow *flow, const size_t flow_size,
604                       struct rte_flow_error *error)
605 {
606         unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
607         struct ibv_flow_spec_action_drop drop = {
608                         .type = IBV_FLOW_SPEC_ACTION_DROP,
609                         .size = size,
610         };
611
612         if (flow->fate)
613                 return rte_flow_error_set(error, ENOTSUP,
614                                           RTE_FLOW_ERROR_TYPE_ACTION,
615                                           action,
616                                           "multiple fate actions are not"
617                                           " supported");
618         if (size < flow_size)
619                 mlx5_flow_spec_verbs_add(flow, &drop, size);
620         flow->fate |= MLX5_FLOW_FATE_DROP;
621         return size;
622 }
623
624 /**
625  * Convert the @p action into @p flow after ensuring the NIC will understand
626  * and process it correctly.
627  *
628  * @param[in] dev
629  *   Pointer to Ethernet device structure.
630  * @param[in] action
631  *   Action configuration.
632  * @param[in, out] flow
633  *   Pointer to flow structure.
634  * @param[out] error
635  *   Pointer to error structure.
636  *
637  * @return
638  *   0 on success, a negative errno value otherwise and rte_errno is set.
639  */
640 static int
641 mlx5_flow_action_queue(struct rte_eth_dev *dev,
642                        const struct rte_flow_action *action,
643                        struct rte_flow *flow,
644                        struct rte_flow_error *error)
645 {
646         struct priv *priv = dev->data->dev_private;
647         const struct rte_flow_action_queue *queue = action->conf;
648
649         if (flow->fate)
650                 return rte_flow_error_set(error, ENOTSUP,
651                                           RTE_FLOW_ERROR_TYPE_ACTION,
652                                           action,
653                                           "multiple fate actions are not"
654                                           " supported");
655         if (queue->index >= priv->rxqs_n)
656                 return rte_flow_error_set(error, EINVAL,
657                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
658                                           &queue->index,
659                                           "queue index out of range");
660         if (!(*priv->rxqs)[queue->index])
661                 return rte_flow_error_set(error, EINVAL,
662                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
663                                           &queue->index,
664                                           "queue is not configured");
665         flow->queue = queue->index;
666         flow->fate |= MLX5_FLOW_FATE_QUEUE;
667         return 0;
668 }
669
670 /**
671  * Convert the @p action into @p flow after ensuring the NIC will understand
672  * and process it correctly.
673  * The conversion is performed action per action, each of them is written into
674  * the @p flow if its size is lesser or equal to @p flow_size.
675  * Validation and memory consumption computation are still performed until the
676  * end of @p action, unless an error is encountered.
677  *
678  * @param[in] dev
679  *   Pointer to Ethernet device structure.
680  * @param[in] actions
681  *   Pointer to flow actions array.
682  * @param[in, out] flow
683  *   Pointer to the rte_flow structure.
684  * @param[in] flow_size
685  *   Size in bytes of the available space in @p flow, if too small some
686  *   garbage may be present.
687  * @param[out] error
688  *   Pointer to error structure.
689  *
690  * @return
691  *   On success the number of bytes consumed/necessary, if the returned value
692  *   is lesser or equal to @p flow_size, the @p actions has fully been
693  *   converted, otherwise another call with this returned memory size should
694  *   be done.
695  *   On error, a negative errno value is returned and rte_errno is set.
696  */
697 static int
698 mlx5_flow_actions(struct rte_eth_dev *dev,
699                   const struct rte_flow_action actions[],
700                   struct rte_flow *flow, const size_t flow_size,
701                   struct rte_flow_error *error)
702 {
703         size_t size = 0;
704         int remain = flow_size;
705         int ret = 0;
706
707         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
708                 switch (actions->type) {
709                 case RTE_FLOW_ACTION_TYPE_VOID:
710                         break;
711                 case RTE_FLOW_ACTION_TYPE_DROP:
712                         ret = mlx5_flow_action_drop(actions, flow, remain,
713                                                     error);
714                         break;
715                 case RTE_FLOW_ACTION_TYPE_QUEUE:
716                         ret = mlx5_flow_action_queue(dev, actions, flow, error);
717                         break;
718                 default:
719                         return rte_flow_error_set(error, ENOTSUP,
720                                                   RTE_FLOW_ERROR_TYPE_ACTION,
721                                                   actions,
722                                                   "action not supported");
723                 }
724                 if (ret < 0)
725                         return ret;
726                 if (remain > ret)
727                         remain -= ret;
728                 else
729                         remain = 0;
730                 size += ret;
731         }
732         if (!flow->fate)
733                 return rte_flow_error_set(error, ENOTSUP,
734                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
735                                           NULL,
736                                           "no fate action found");
737         return size;
738 }
739
740 /**
741  * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
742  * after ensuring the NIC will understand and process it correctly.
743  * The conversion is only performed item/action per item/action, each of
744  * them is written into the @p flow if its size is lesser or equal to @p
745  * flow_size.
746  * Validation and memory consumption computation are still performed until the
747  * end, unless an error is encountered.
748  *
749  * @param[in] dev
750  *   Pointer to Ethernet device.
751  * @param[in, out] flow
752  *   Pointer to flow structure.
753  * @param[in] flow_size
754  *   Size in bytes of the available space in @p flow, if too small some
755  *   garbage may be present.
756  * @param[in] attributes
757  *   Flow rule attributes.
758  * @param[in] pattern
759  *   Pattern specification (list terminated by the END pattern item).
760  * @param[in] actions
761  *   Associated actions (list terminated by the END action).
762  * @param[out] error
763  *   Perform verbose error reporting if not NULL.
764  *
765  * @return
766  *   On success the number of bytes consumed/necessary, if the returned value
767  *   is lesser or equal to @p flow_size, the flow has fully been converted and
768  *   can be applied, otherwise another call with this returned memory size
769  *   should be done.
770  *   On error, a negative errno value is returned and rte_errno is set.
771  */
772 static int
773 mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
774                 const size_t flow_size,
775                 const struct rte_flow_attr *attributes,
776                 const struct rte_flow_item pattern[],
777                 const struct rte_flow_action actions[],
778                 struct rte_flow_error *error)
779 {
780         struct rte_flow local_flow = { .layers = 0, };
781         size_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);
782         int remain = (flow_size > size) ? flow_size - size : 0;
783         int ret;
784
785         if (!remain)
786                 flow = &local_flow;
787         ret = mlx5_flow_attributes(dev, attributes, flow, error);
788         if (ret < 0)
789                 return ret;
790         ret = mlx5_flow_items(pattern, flow, remain, error);
791         if (ret < 0)
792                 return ret;
793         size += ret;
794         remain = (flow_size > size) ? flow_size - size : 0;
795         ret = mlx5_flow_actions(dev, actions, flow, remain, error);
796         if (ret < 0)
797                 return ret;
798         size += ret;
799         if (size <= flow_size)
800                 flow->verbs.attr->priority = flow->attributes.priority;
801         return size;
802 }
803
804 /**
805  * Validate a flow supported by the NIC.
806  *
807  * @see rte_flow_validate()
808  * @see rte_flow_ops
809  */
810 int
811 mlx5_flow_validate(struct rte_eth_dev *dev,
812                    const struct rte_flow_attr *attr,
813                    const struct rte_flow_item items[],
814                    const struct rte_flow_action actions[],
815                    struct rte_flow_error *error)
816 {
817         int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
818
819         if (ret < 0)
820                 return ret;
821         return 0;
822 }
823
824 /**
825  * Remove the flow.
826  *
827  * @param[in] dev
828  *   Pointer to Ethernet device.
829  * @param[in, out] flow
830  *   Pointer to flow structure.
831  */
832 static void
833 mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
834 {
835         if (flow->fate & MLX5_FLOW_FATE_DROP) {
836                 if (flow->verbs.flow) {
837                         claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
838                         flow->verbs.flow = NULL;
839                 }
840         }
841         if (flow->verbs.hrxq) {
842                 if (flow->fate & MLX5_FLOW_FATE_DROP)
843                         mlx5_hrxq_drop_release(dev);
844                 else if (flow->fate & MLX5_FLOW_FATE_QUEUE)
845                         mlx5_hrxq_release(dev, flow->verbs.hrxq);
846                 flow->verbs.hrxq = NULL;
847         }
848 }
849
850 /**
851  * Apply the flow.
852  *
853  * @param[in] dev
854  *   Pointer to Ethernet device structure.
855  * @param[in, out] flow
856  *   Pointer to flow structure.
857  * @param[out] error
858  *   Pointer to error structure.
859  *
860  * @return
861  *   0 on success, a negative errno value otherwise and rte_errno is set.
862  */
863 static int
864 mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
865                 struct rte_flow_error *error)
866 {
867         if (flow->fate & MLX5_FLOW_FATE_DROP) {
868                 flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
869                 if (!flow->verbs.hrxq)
870                         return rte_flow_error_set
871                                 (error, errno,
872                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
873                                  NULL,
874                                  "cannot allocate Drop queue");
875         } else if (flow->fate & MLX5_FLOW_FATE_QUEUE) {
876                 struct mlx5_hrxq *hrxq;
877
878                 hrxq = mlx5_hrxq_get(dev, rss_hash_default_key,
879                                      rss_hash_default_key_len, 0,
880                                      &flow->queue, 1, 0, 0);
881                 if (!hrxq)
882                         hrxq = mlx5_hrxq_new(dev, rss_hash_default_key,
883                                              rss_hash_default_key_len, 0,
884                                              &flow->queue, 1, 0, 0);
885                 if (!hrxq)
886                         return rte_flow_error_set(error, rte_errno,
887                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
888                                         NULL,
889                                         "cannot create flow");
890                 flow->verbs.hrxq = hrxq;
891         }
892         flow->verbs.flow =
893                 mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
894         if (!flow->verbs.flow) {
895                 if (flow->fate & MLX5_FLOW_FATE_DROP)
896                         mlx5_hrxq_drop_release(dev);
897                 else
898                         mlx5_hrxq_release(dev, flow->verbs.hrxq);
899                 flow->verbs.hrxq = NULL;
900                 return rte_flow_error_set(error, errno,
901                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
902                                           NULL,
903                                           "kernel module refuses to create"
904                                           " flow");
905         }
906         return 0;
907 }
908
909 /**
910  * Create a flow and add it to @p list.
911  *
912  * @param dev
913  *   Pointer to Ethernet device.
914  * @param list
915  *   Pointer to a TAILQ flow list.
916  * @param[in] attr
917  *   Flow rule attributes.
918  * @param[in] items
919  *   Pattern specification (list terminated by the END pattern item).
920  * @param[in] actions
921  *   Associated actions (list terminated by the END action).
922  * @param[out] error
923  *   Perform verbose error reporting if not NULL.
924  *
925  * @return
926  *   A flow on success, NULL otherwise and rte_errno is set.
927  */
928 static struct rte_flow *
929 mlx5_flow_list_create(struct rte_eth_dev *dev,
930                       struct mlx5_flows *list,
931                       const struct rte_flow_attr *attr,
932                       const struct rte_flow_item items[],
933                       const struct rte_flow_action actions[],
934                       struct rte_flow_error *error)
935 {
936         struct rte_flow *flow;
937         size_t size;
938         int ret;
939
940         ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
941         if (ret < 0)
942                 return NULL;
943         size = ret;
944         flow = rte_zmalloc(__func__, size, 0);
945         if (!flow) {
946                 rte_flow_error_set(error, ENOMEM,
947                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
948                                    NULL,
949                                    "cannot allocate memory");
950                 return NULL;
951         }
952         flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
953         flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
954         ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
955         if (ret < 0)
956                 goto error;
957         assert((size_t)ret == size);
958         if (dev->data->dev_started) {
959                 ret = mlx5_flow_apply(dev, flow, error);
960                 if (ret < 0)
961                         goto error;
962         }
963         TAILQ_INSERT_TAIL(list, flow, next);
964         return flow;
965 error:
966         ret = rte_errno; /* Save rte_errno before cleanup. */
967         mlx5_flow_remove(dev, flow);
968         rte_free(flow);
969         rte_errno = ret; /* Restore rte_errno. */
970         return NULL;
971 }
972
973 /**
974  * Create a flow.
975  *
976  * @see rte_flow_create()
977  * @see rte_flow_ops
978  */
979 struct rte_flow *
980 mlx5_flow_create(struct rte_eth_dev *dev,
981                  const struct rte_flow_attr *attr,
982                  const struct rte_flow_item items[],
983                  const struct rte_flow_action actions[],
984                  struct rte_flow_error *error)
985 {
986         return mlx5_flow_list_create
987                 (dev, &((struct priv *)dev->data->dev_private)->flows,
988                  attr, items, actions, error);
989 }
990
991 /**
992  * Destroy a flow in a list.
993  *
994  * @param dev
995  *   Pointer to Ethernet device.
996  * @param list
997  *   Pointer to a TAILQ flow list.
998  * @param[in] flow
999  *   Flow to destroy.
1000  */
1001 static void
1002 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
1003                        struct rte_flow *flow)
1004 {
1005         mlx5_flow_remove(dev, flow);
1006         TAILQ_REMOVE(list, flow, next);
1007         rte_free(flow);
1008 }
1009
1010 /**
1011  * Destroy all flows.
1012  *
1013  * @param dev
1014  *   Pointer to Ethernet device.
1015  * @param list
1016  *   Pointer to a TAILQ flow list.
1017  */
1018 void
1019 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
1020 {
1021         while (!TAILQ_EMPTY(list)) {
1022                 struct rte_flow *flow;
1023
1024                 flow = TAILQ_FIRST(list);
1025                 mlx5_flow_list_destroy(dev, list, flow);
1026         }
1027 }
1028
1029 /**
1030  * Remove all flows.
1031  *
1032  * @param dev
1033  *   Pointer to Ethernet device.
1034  * @param list
1035  *   Pointer to a TAILQ flow list.
1036  */
1037 void
1038 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
1039 {
1040         struct rte_flow *flow;
1041
1042         TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
1043                 mlx5_flow_remove(dev, flow);
1044 }
1045
1046 /**
1047  * Add all flows.
1048  *
1049  * @param dev
1050  *   Pointer to Ethernet device.
1051  * @param list
1052  *   Pointer to a TAILQ flow list.
1053  *
1054  * @return
1055  *   0 on success, a negative errno value otherwise and rte_errno is set.
1056  */
1057 int
1058 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
1059 {
1060         struct rte_flow *flow;
1061         struct rte_flow_error error;
1062         int ret = 0;
1063
1064         TAILQ_FOREACH(flow, list, next) {
1065                 ret = mlx5_flow_apply(dev, flow, &error);
1066                 if (ret < 0)
1067                         goto error;
1068         }
1069         return 0;
1070 error:
1071         ret = rte_errno; /* Save rte_errno before cleanup. */
1072         mlx5_flow_stop(dev, list);
1073         rte_errno = ret; /* Restore rte_errno. */
1074         return -rte_errno;
1075 }
1076
1077 /**
1078  * Verify the flow list is empty
1079  *
1080  * @param dev
1081  *  Pointer to Ethernet device.
1082  *
1083  * @return the number of flows not released.
1084  */
1085 int
1086 mlx5_flow_verify(struct rte_eth_dev *dev)
1087 {
1088         struct priv *priv = dev->data->dev_private;
1089         struct rte_flow *flow;
1090         int ret = 0;
1091
1092         TAILQ_FOREACH(flow, &priv->flows, next) {
1093                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
1094                         dev->data->port_id, (void *)flow);
1095                 ++ret;
1096         }
1097         return ret;
1098 }
1099
1100 /**
1101  * Enable a control flow configured from the control plane.
1102  *
1103  * @param dev
1104  *   Pointer to Ethernet device.
1105  * @param eth_spec
1106  *   An Ethernet flow spec to apply.
1107  * @param eth_mask
1108  *   An Ethernet flow mask to apply.
1109  * @param vlan_spec
1110  *   A VLAN flow spec to apply.
1111  * @param vlan_mask
1112  *   A VLAN flow mask to apply.
1113  *
1114  * @return
1115  *   0 on success, a negative errno value otherwise and rte_errno is set.
1116  */
1117 int
1118 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1119                     struct rte_flow_item_eth *eth_spec,
1120                     struct rte_flow_item_eth *eth_mask,
1121                     struct rte_flow_item_vlan *vlan_spec,
1122                     struct rte_flow_item_vlan *vlan_mask)
1123 {
1124         struct priv *priv = dev->data->dev_private;
1125         const struct rte_flow_attr attr = {
1126                 .ingress = 1,
1127                 .priority = priv->config.flow_prio - 1,
1128         };
1129         struct rte_flow_item items[] = {
1130                 {
1131                         .type = RTE_FLOW_ITEM_TYPE_ETH,
1132                         .spec = eth_spec,
1133                         .last = NULL,
1134                         .mask = eth_mask,
1135                 },
1136                 {
1137                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
1138                                 RTE_FLOW_ITEM_TYPE_END,
1139                         .spec = vlan_spec,
1140                         .last = NULL,
1141                         .mask = vlan_mask,
1142                 },
1143                 {
1144                         .type = RTE_FLOW_ITEM_TYPE_END,
1145                 },
1146         };
1147         uint16_t queue[priv->reta_idx_n];
1148         struct rte_flow_action_rss action_rss = {
1149                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
1150                 .level = 0,
1151                 .types = priv->rss_conf.rss_hf,
1152                 .key_len = priv->rss_conf.rss_key_len,
1153                 .queue_num = priv->reta_idx_n,
1154                 .key = priv->rss_conf.rss_key,
1155                 .queue = queue,
1156         };
1157         struct rte_flow_action actions[] = {
1158                 {
1159                         .type = RTE_FLOW_ACTION_TYPE_RSS,
1160                         .conf = &action_rss,
1161                 },
1162                 {
1163                         .type = RTE_FLOW_ACTION_TYPE_END,
1164                 },
1165         };
1166         struct rte_flow *flow;
1167         struct rte_flow_error error;
1168         unsigned int i;
1169
1170         if (!priv->reta_idx_n) {
1171                 rte_errno = EINVAL;
1172                 return -rte_errno;
1173         }
1174         for (i = 0; i != priv->reta_idx_n; ++i)
1175                 queue[i] = (*priv->reta_idx)[i];
1176         flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
1177                                      actions, &error);
1178         if (!flow)
1179                 return -rte_errno;
1180         return 0;
1181 }
1182
1183 /**
1184  * Enable a flow control configured from the control plane.
1185  *
1186  * @param dev
1187  *   Pointer to Ethernet device.
1188  * @param eth_spec
1189  *   An Ethernet flow spec to apply.
1190  * @param eth_mask
1191  *   An Ethernet flow mask to apply.
1192  *
1193  * @return
1194  *   0 on success, a negative errno value otherwise and rte_errno is set.
1195  */
1196 int
1197 mlx5_ctrl_flow(struct rte_eth_dev *dev,
1198                struct rte_flow_item_eth *eth_spec,
1199                struct rte_flow_item_eth *eth_mask)
1200 {
1201         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
1202 }
1203
1204 /**
1205  * Destroy a flow.
1206  *
1207  * @see rte_flow_destroy()
1208  * @see rte_flow_ops
1209  */
1210 int
1211 mlx5_flow_destroy(struct rte_eth_dev *dev,
1212                   struct rte_flow *flow,
1213                   struct rte_flow_error *error __rte_unused)
1214 {
1215         struct priv *priv = dev->data->dev_private;
1216
1217         mlx5_flow_list_destroy(dev, &priv->flows, flow);
1218         return 0;
1219 }
1220
1221 /**
1222  * Destroy all flows.
1223  *
1224  * @see rte_flow_flush()
1225  * @see rte_flow_ops
1226  */
1227 int
1228 mlx5_flow_flush(struct rte_eth_dev *dev,
1229                 struct rte_flow_error *error __rte_unused)
1230 {
1231         struct priv *priv = dev->data->dev_private;
1232
1233         mlx5_flow_list_flush(dev, &priv->flows);
1234         return 0;
1235 }
1236
1237 /**
1238  * Isolated mode.
1239  *
1240  * @see rte_flow_isolate()
1241  * @see rte_flow_ops
1242  */
1243 int
1244 mlx5_flow_isolate(struct rte_eth_dev *dev,
1245                   int enable,
1246                   struct rte_flow_error *error)
1247 {
1248         struct priv *priv = dev->data->dev_private;
1249
1250         if (dev->data->dev_started) {
1251                 rte_flow_error_set(error, EBUSY,
1252                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1253                                    NULL,
1254                                    "port must be stopped first");
1255                 return -rte_errno;
1256         }
1257         priv->isolated = !!enable;
1258         if (enable)
1259                 dev->dev_ops = &mlx5_dev_ops_isolate;
1260         else
1261                 dev->dev_ops = &mlx5_dev_ops;
1262         return 0;
1263 }
1264
1265 /**
1266  * Convert a flow director filter to a generic flow.
1267  *
1268  * @param dev
1269  *   Pointer to Ethernet device.
1270  * @param fdir_filter
1271  *   Flow director filter to add.
1272  * @param attributes
1273  *   Generic flow parameters structure.
1274  *
1275  * @return
1276  *   0 on success, a negative errno value otherwise and rte_errno is set.
1277  */
1278 static int
1279 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
1280                          const struct rte_eth_fdir_filter *fdir_filter,
1281                          struct mlx5_fdir *attributes)
1282 {
1283         struct priv *priv = dev->data->dev_private;
1284         const struct rte_eth_fdir_input *input = &fdir_filter->input;
1285         const struct rte_eth_fdir_masks *mask =
1286                 &dev->data->dev_conf.fdir_conf.mask;
1287
1288         /* Validate queue number. */
1289         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
1290                 DRV_LOG(ERR, "port %u invalid queue number %d",
1291                         dev->data->port_id, fdir_filter->action.rx_queue);
1292                 rte_errno = EINVAL;
1293                 return -rte_errno;
1294         }
1295         attributes->attr.ingress = 1;
1296         attributes->items[0] = (struct rte_flow_item) {
1297                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1298                 .spec = &attributes->l2,
1299                 .mask = &attributes->l2_mask,
1300         };
1301         switch (fdir_filter->action.behavior) {
1302         case RTE_ETH_FDIR_ACCEPT:
1303                 attributes->actions[0] = (struct rte_flow_action){
1304                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1305                         .conf = &attributes->queue,
1306                 };
1307                 break;
1308         case RTE_ETH_FDIR_REJECT:
1309                 attributes->actions[0] = (struct rte_flow_action){
1310                         .type = RTE_FLOW_ACTION_TYPE_DROP,
1311                 };
1312                 break;
1313         default:
1314                 DRV_LOG(ERR, "port %u invalid behavior %d",
1315                         dev->data->port_id,
1316                         fdir_filter->action.behavior);
1317                 rte_errno = ENOTSUP;
1318                 return -rte_errno;
1319         }
1320         attributes->queue.index = fdir_filter->action.rx_queue;
1321         /* Handle L3. */
1322         switch (fdir_filter->input.flow_type) {
1323         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1324         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1325         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1326                 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
1327                         .src_addr = input->flow.ip4_flow.src_ip,
1328                         .dst_addr = input->flow.ip4_flow.dst_ip,
1329                         .time_to_live = input->flow.ip4_flow.ttl,
1330                         .type_of_service = input->flow.ip4_flow.tos,
1331                         .next_proto_id = input->flow.ip4_flow.proto,
1332                 };
1333                 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
1334                         .src_addr = mask->ipv4_mask.src_ip,
1335                         .dst_addr = mask->ipv4_mask.dst_ip,
1336                         .time_to_live = mask->ipv4_mask.ttl,
1337                         .type_of_service = mask->ipv4_mask.tos,
1338                         .next_proto_id = mask->ipv4_mask.proto,
1339                 };
1340                 attributes->items[1] = (struct rte_flow_item){
1341                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
1342                         .spec = &attributes->l3,
1343                         .mask = &attributes->l3_mask,
1344                 };
1345                 break;
1346         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1347         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1348         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1349                 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
1350                         .hop_limits = input->flow.ipv6_flow.hop_limits,
1351                         .proto = input->flow.ipv6_flow.proto,
1352                 };
1353
1354                 memcpy(attributes->l3.ipv6.hdr.src_addr,
1355                        input->flow.ipv6_flow.src_ip,
1356                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1357                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
1358                        input->flow.ipv6_flow.dst_ip,
1359                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1360                 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
1361                        mask->ipv6_mask.src_ip,
1362                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1363                 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
1364                        mask->ipv6_mask.dst_ip,
1365                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1366                 attributes->items[1] = (struct rte_flow_item){
1367                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
1368                         .spec = &attributes->l3,
1369                         .mask = &attributes->l3_mask,
1370                 };
1371                 break;
1372         default:
1373                 DRV_LOG(ERR, "port %u invalid flow type%d",
1374                         dev->data->port_id, fdir_filter->input.flow_type);
1375                 rte_errno = ENOTSUP;
1376                 return -rte_errno;
1377         }
1378         /* Handle L4. */
1379         switch (fdir_filter->input.flow_type) {
1380         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1381                 attributes->l4.udp.hdr = (struct udp_hdr){
1382                         .src_port = input->flow.udp4_flow.src_port,
1383                         .dst_port = input->flow.udp4_flow.dst_port,
1384                 };
1385                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1386                         .src_port = mask->src_port_mask,
1387                         .dst_port = mask->dst_port_mask,
1388                 };
1389                 attributes->items[2] = (struct rte_flow_item){
1390                         .type = RTE_FLOW_ITEM_TYPE_UDP,
1391                         .spec = &attributes->l4,
1392                         .mask = &attributes->l4_mask,
1393                 };
1394                 break;
1395         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1396                 attributes->l4.tcp.hdr = (struct tcp_hdr){
1397                         .src_port = input->flow.tcp4_flow.src_port,
1398                         .dst_port = input->flow.tcp4_flow.dst_port,
1399                 };
1400                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1401                         .src_port = mask->src_port_mask,
1402                         .dst_port = mask->dst_port_mask,
1403                 };
1404                 attributes->items[2] = (struct rte_flow_item){
1405                         .type = RTE_FLOW_ITEM_TYPE_TCP,
1406                         .spec = &attributes->l4,
1407                         .mask = &attributes->l4_mask,
1408                 };
1409                 break;
1410         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1411                 attributes->l4.udp.hdr = (struct udp_hdr){
1412                         .src_port = input->flow.udp6_flow.src_port,
1413                         .dst_port = input->flow.udp6_flow.dst_port,
1414                 };
1415                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1416                         .src_port = mask->src_port_mask,
1417                         .dst_port = mask->dst_port_mask,
1418                 };
1419                 attributes->items[2] = (struct rte_flow_item){
1420                         .type = RTE_FLOW_ITEM_TYPE_UDP,
1421                         .spec = &attributes->l4,
1422                         .mask = &attributes->l4_mask,
1423                 };
1424                 break;
1425         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1426                 attributes->l4.tcp.hdr = (struct tcp_hdr){
1427                         .src_port = input->flow.tcp6_flow.src_port,
1428                         .dst_port = input->flow.tcp6_flow.dst_port,
1429                 };
1430                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1431                         .src_port = mask->src_port_mask,
1432                         .dst_port = mask->dst_port_mask,
1433                 };
1434                 attributes->items[2] = (struct rte_flow_item){
1435                         .type = RTE_FLOW_ITEM_TYPE_TCP,
1436                         .spec = &attributes->l4,
1437                         .mask = &attributes->l4_mask,
1438                 };
1439                 break;
1440         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1441         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1442                 break;
1443         default:
1444                 DRV_LOG(ERR, "port %u invalid flow type%d",
1445                         dev->data->port_id, fdir_filter->input.flow_type);
1446                 rte_errno = ENOTSUP;
1447                 return -rte_errno;
1448         }
1449         return 0;
1450 }
1451
1452 /**
1453  * Add new flow director filter and store it in list.
1454  *
1455  * @param dev
1456  *   Pointer to Ethernet device.
1457  * @param fdir_filter
1458  *   Flow director filter to add.
1459  *
1460  * @return
1461  *   0 on success, a negative errno value otherwise and rte_errno is set.
1462  */
1463 static int
1464 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
1465                      const struct rte_eth_fdir_filter *fdir_filter)
1466 {
1467         struct priv *priv = dev->data->dev_private;
1468         struct mlx5_fdir attributes = {
1469                 .attr.group = 0,
1470                 .l2_mask = {
1471                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1472                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1473                         .type = 0,
1474                 },
1475         };
1476         struct rte_flow_error error;
1477         struct rte_flow *flow;
1478         int ret;
1479
1480         ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
1481         if (ret)
1482                 return ret;
1483         flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
1484                                      attributes.items, attributes.actions,
1485                                      &error);
1486         if (flow) {
1487                 DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
1488                         (void *)flow);
1489                 return 0;
1490         }
1491         return -rte_errno;
1492 }
1493
1494 /**
1495  * Delete specific filter.
1496  *
1497  * @param dev
1498  *   Pointer to Ethernet device.
1499  * @param fdir_filter
1500  *   Filter to be deleted.
1501  *
1502  * @return
1503  *   0 on success, a negative errno value otherwise and rte_errno is set.
1504  */
1505 static int
1506 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
1507                         const struct rte_eth_fdir_filter *fdir_filter
1508                         __rte_unused)
1509 {
1510         rte_errno = ENOTSUP;
1511         return -rte_errno;
1512 }
1513
1514 /**
1515  * Update queue for specific filter.
1516  *
1517  * @param dev
1518  *   Pointer to Ethernet device.
1519  * @param fdir_filter
1520  *   Filter to be updated.
1521  *
1522  * @return
1523  *   0 on success, a negative errno value otherwise and rte_errno is set.
1524  */
1525 static int
1526 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
1527                         const struct rte_eth_fdir_filter *fdir_filter)
1528 {
1529         int ret;
1530
1531         ret = mlx5_fdir_filter_delete(dev, fdir_filter);
1532         if (ret)
1533                 return ret;
1534         return mlx5_fdir_filter_add(dev, fdir_filter);
1535 }
1536
1537 /**
1538  * Flush all filters.
1539  *
1540  * @param dev
1541  *   Pointer to Ethernet device.
1542  */
1543 static void
1544 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
1545 {
1546         struct priv *priv = dev->data->dev_private;
1547
1548         mlx5_flow_list_flush(dev, &priv->flows);
1549 }
1550
1551 /**
1552  * Get flow director information.
1553  *
1554  * @param dev
1555  *   Pointer to Ethernet device.
1556  * @param[out] fdir_info
1557  *   Resulting flow director information.
1558  */
1559 static void
1560 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1561 {
1562         struct rte_eth_fdir_masks *mask =
1563                 &dev->data->dev_conf.fdir_conf.mask;
1564
1565         fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1566         fdir_info->guarant_spc = 0;
1567         rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
1568         fdir_info->max_flexpayload = 0;
1569         fdir_info->flow_types_mask[0] = 0;
1570         fdir_info->flex_payload_unit = 0;
1571         fdir_info->max_flex_payload_segment_num = 0;
1572         fdir_info->flex_payload_limit = 0;
1573         memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
1574 }
1575
1576 /**
1577  * Deal with flow director operations.
1578  *
1579  * @param dev
1580  *   Pointer to Ethernet device.
1581  * @param filter_op
1582  *   Operation to perform.
1583  * @param arg
1584  *   Pointer to operation-specific structure.
1585  *
1586  * @return
1587  *   0 on success, a negative errno value otherwise and rte_errno is set.
1588  */
1589 static int
1590 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1591                     void *arg)
1592 {
1593         enum rte_fdir_mode fdir_mode =
1594                 dev->data->dev_conf.fdir_conf.mode;
1595
1596         if (filter_op == RTE_ETH_FILTER_NOP)
1597                 return 0;
1598         if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
1599             fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1600                 DRV_LOG(ERR, "port %u flow director mode %d not supported",
1601                         dev->data->port_id, fdir_mode);
1602                 rte_errno = EINVAL;
1603                 return -rte_errno;
1604         }
1605         switch (filter_op) {
1606         case RTE_ETH_FILTER_ADD:
1607                 return mlx5_fdir_filter_add(dev, arg);
1608         case RTE_ETH_FILTER_UPDATE:
1609                 return mlx5_fdir_filter_update(dev, arg);
1610         case RTE_ETH_FILTER_DELETE:
1611                 return mlx5_fdir_filter_delete(dev, arg);
1612         case RTE_ETH_FILTER_FLUSH:
1613                 mlx5_fdir_filter_flush(dev);
1614                 break;
1615         case RTE_ETH_FILTER_INFO:
1616                 mlx5_fdir_info_get(dev, arg);
1617                 break;
1618         default:
1619                 DRV_LOG(DEBUG, "port %u unknown operation %u",
1620                         dev->data->port_id, filter_op);
1621                 rte_errno = EINVAL;
1622                 return -rte_errno;
1623         }
1624         return 0;
1625 }
1626
1627 /**
1628  * Manage filter operations.
1629  *
1630  * @param dev
1631  *   Pointer to Ethernet device structure.
1632  * @param filter_type
1633  *   Filter type.
1634  * @param filter_op
1635  *   Operation to perform.
1636  * @param arg
1637  *   Pointer to operation-specific structure.
1638  *
1639  * @return
1640  *   0 on success, a negative errno value otherwise and rte_errno is set.
1641  */
1642 int
1643 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
1644                      enum rte_filter_type filter_type,
1645                      enum rte_filter_op filter_op,
1646                      void *arg)
1647 {
1648         switch (filter_type) {
1649         case RTE_ETH_FILTER_GENERIC:
1650                 if (filter_op != RTE_ETH_FILTER_GET) {
1651                         rte_errno = EINVAL;
1652                         return -rte_errno;
1653                 }
1654                 *(const void **)arg = &mlx5_flow_ops;
1655                 return 0;
1656         case RTE_ETH_FILTER_FDIR:
1657                 return mlx5_fdir_ctrl_func(dev, filter_op, arg);
1658         default:
1659                 DRV_LOG(ERR, "port %u filter type (%d) not supported",
1660                         dev->data->port_id, filter_type);
1661                 rte_errno = ENOTSUP;
1662                 return -rte_errno;
1663         }
1664         return 0;
1665 }