net/mlx5: add flow UDP item
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <sys/queue.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_prm.h"
32 #include "mlx5_glue.h"
33
34 /* Dev ops structure defined in mlx5.c */
35 extern const struct eth_dev_ops mlx5_dev_ops;
36 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
37
38 /* Pattern Layer bits. */
39 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
40 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
41 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
42 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
43 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
44 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
45 /* Masks. */
46 #define MLX5_FLOW_LAYER_OUTER_L3 \
47         (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
48 #define MLX5_FLOW_LAYER_OUTER_L4 \
49         (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
50
51 /* Actions that modify the fate of matching traffic. */
52 #define MLX5_FLOW_FATE_DROP (1u << 0)
53 #define MLX5_FLOW_FATE_QUEUE (1u << 1)
54
55 /* possible L3 layers protocols filtering. */
56 #define MLX5_IP_PROTOCOL_UDP 17
57
58 /** Handles information leading to a drop fate. */
59 struct mlx5_flow_verbs {
60         unsigned int size; /**< Size of the attribute. */
61         struct {
62                 struct ibv_flow_attr *attr;
63                 /**< Pointer to the Specification buffer. */
64                 uint8_t *specs; /**< Pointer to the specifications. */
65         };
66         struct ibv_flow *flow; /**< Verbs flow pointer. */
67         struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
68 };
69
70 /* Flow structure. */
71 struct rte_flow {
72         TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
73         struct rte_flow_attr attributes; /**< User flow attribute. */
74         uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
75         uint32_t layers;
76         /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
77         uint32_t fate;
78         /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
79         uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
80         struct mlx5_flow_verbs verbs; /* Verbs flow. */
81         uint16_t queue; /**< Destination queue to redirect traffic to. */
82 };
83
84 static const struct rte_flow_ops mlx5_flow_ops = {
85         .validate = mlx5_flow_validate,
86         .create = mlx5_flow_create,
87         .destroy = mlx5_flow_destroy,
88         .flush = mlx5_flow_flush,
89         .isolate = mlx5_flow_isolate,
90 };
91
92 /* Convert FDIR request to Generic flow. */
93 struct mlx5_fdir {
94         struct rte_flow_attr attr;
95         struct rte_flow_action actions[2];
96         struct rte_flow_item items[4];
97         struct rte_flow_item_eth l2;
98         struct rte_flow_item_eth l2_mask;
99         union {
100                 struct rte_flow_item_ipv4 ipv4;
101                 struct rte_flow_item_ipv6 ipv6;
102         } l3;
103         union {
104                 struct rte_flow_item_ipv4 ipv4;
105                 struct rte_flow_item_ipv6 ipv6;
106         } l3_mask;
107         union {
108                 struct rte_flow_item_udp udp;
109                 struct rte_flow_item_tcp tcp;
110         } l4;
111         union {
112                 struct rte_flow_item_udp udp;
113                 struct rte_flow_item_tcp tcp;
114         } l4_mask;
115         struct rte_flow_action_queue queue;
116 };
117
118 /* Verbs specification header. */
119 struct ibv_spec_header {
120         enum ibv_flow_spec_type type;
121         uint16_t size;
122 };
123
124  /**
125   * Discover the maximum number of priority available.
126   *
127   * @param[in] dev
128   *   Pointer to Ethernet device.
129   *
130   * @return
131   *   number of supported flow priority on success, a negative errno value
132   *   otherwise and rte_errno is set.
133   */
134 int
135 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
136 {
137         struct {
138                 struct ibv_flow_attr attr;
139                 struct ibv_flow_spec_eth eth;
140                 struct ibv_flow_spec_action_drop drop;
141         } flow_attr = {
142                 .attr = {
143                         .num_of_specs = 2,
144                 },
145                 .eth = {
146                         .type = IBV_FLOW_SPEC_ETH,
147                         .size = sizeof(struct ibv_flow_spec_eth),
148                 },
149                 .drop = {
150                         .size = sizeof(struct ibv_flow_spec_action_drop),
151                         .type = IBV_FLOW_SPEC_ACTION_DROP,
152                 },
153         };
154         struct ibv_flow *flow;
155         struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
156         uint16_t vprio[] = { 8, 16 };
157         int i;
158
159         if (!drop) {
160                 rte_errno = ENOTSUP;
161                 return -rte_errno;
162         }
163         for (i = 0; i != RTE_DIM(vprio); i++) {
164                 flow_attr.attr.priority = vprio[i] - 1;
165                 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
166                 if (!flow)
167                         break;
168                 claim_zero(mlx5_glue->destroy_flow(flow));
169         }
170         mlx5_hrxq_drop_release(dev);
171         DRV_LOG(INFO, "port %u flow maximum priority: %d",
172                 dev->data->port_id, vprio[i - 1]);
173         return vprio[i - 1];
174 }
175
176 /**
177  * Verify the @p attributes will be correctly understood by the NIC and store
178  * them in the @p flow if everything is correct.
179  *
180  * @param[in] dev
181  *   Pointer to Ethernet device.
182  * @param[in] attributes
183  *   Pointer to flow attributes
184  * @param[in, out] flow
185  *   Pointer to the rte_flow structure.
186  * @param[out] error
187  *   Pointer to error structure.
188  *
189  * @return
190  *   0 on success, a negative errno value otherwise and rte_errno is set.
191  */
192 static int
193 mlx5_flow_attributes(struct rte_eth_dev *dev,
194                      const struct rte_flow_attr *attributes,
195                      struct rte_flow *flow,
196                      struct rte_flow_error *error)
197 {
198         uint32_t priority_max =
199                 ((struct priv *)dev->data->dev_private)->config.flow_prio;
200
201         if (attributes->group)
202                 return rte_flow_error_set(error, ENOTSUP,
203                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
204                                           NULL,
205                                           "groups is not supported");
206         if (attributes->priority >= priority_max)
207                 return rte_flow_error_set(error, ENOTSUP,
208                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
209                                           NULL,
210                                           "priority out of range");
211         if (attributes->egress)
212                 return rte_flow_error_set(error, ENOTSUP,
213                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
214                                           NULL,
215                                           "egress is not supported");
216         if (attributes->transfer)
217                 return rte_flow_error_set(error, ENOTSUP,
218                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
219                                           NULL,
220                                           "transfer is not supported");
221         if (!attributes->ingress)
222                 return rte_flow_error_set(error, ENOTSUP,
223                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
224                                           NULL,
225                                           "ingress attribute is mandatory");
226         flow->attributes = *attributes;
227         return 0;
228 }
229
230 /**
231  * Verify the @p item specifications (spec, last, mask) are compatible with the
232  * NIC capabilities.
233  *
234  * @param[in] item
235  *   Item specification.
236  * @param[in] mask
237  *   @p item->mask or flow default bit-masks.
238  * @param[in] nic_mask
239  *   Bit-masks covering supported fields by the NIC to compare with user mask.
240  * @param[in] size
241  *   Bit-masks size in bytes.
242  * @param[out] error
243  *   Pointer to error structure.
244  *
245  * @return
246  *   0 on success, a negative errno value otherwise and rte_errno is set.
247  */
248 static int
249 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
250                           const uint8_t *mask,
251                           const uint8_t *nic_mask,
252                           unsigned int size,
253                           struct rte_flow_error *error)
254 {
255         unsigned int i;
256
257         assert(nic_mask);
258         for (i = 0; i < size; ++i)
259                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
260                         return rte_flow_error_set(error, ENOTSUP,
261                                                   RTE_FLOW_ERROR_TYPE_ITEM,
262                                                   item,
263                                                   "mask enables non supported"
264                                                   " bits");
265         if (!item->spec && (item->mask || item->last))
266                 return rte_flow_error_set(error, EINVAL,
267                                           RTE_FLOW_ERROR_TYPE_ITEM,
268                                           item,
269                                           "mask/last without a spec is not"
270                                           " supported");
271         if (item->spec && item->last) {
272                 uint8_t spec[size];
273                 uint8_t last[size];
274                 unsigned int i;
275                 int ret;
276
277                 for (i = 0; i < size; ++i) {
278                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
279                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
280                 }
281                 ret = memcmp(spec, last, size);
282                 if (ret != 0)
283                         return rte_flow_error_set(error, ENOTSUP,
284                                                   RTE_FLOW_ERROR_TYPE_ITEM,
285                                                   item,
286                                                   "range is not supported");
287         }
288         return 0;
289 }
290
291 /**
292  * Add a verbs specification into @p flow.
293  *
294  * @param[in, out] flow
295  *   Pointer to flow structure.
296  * @param[in] src
297  *   Create specification.
298  * @param[in] size
299  *   Size in bytes of the specification to copy.
300  */
301 static void
302 mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
303 {
304         if (flow->verbs.specs) {
305                 void *dst;
306
307                 dst = (void *)(flow->verbs.specs + flow->verbs.size);
308                 memcpy(dst, src, size);
309                 ++flow->verbs.attr->num_of_specs;
310         }
311         flow->verbs.size += size;
312 }
313
314 /**
315  * Convert the @p item into a Verbs specification after ensuring the NIC
316  * will understand and process it correctly.
317  * If the necessary size for the conversion is greater than the @p flow_size,
318  * nothing is written in @p flow, the validation is still performed.
319  *
320  * @param[in] item
321  *   Item specification.
322  * @param[in, out] flow
323  *   Pointer to flow structure.
324  * @param[in] flow_size
325  *   Size in bytes of the available space in @p flow, if too small, nothing is
326  *   written.
327  * @param[out] error
328  *   Pointer to error structure.
329  *
330  * @return
331  *   On success the number of bytes consumed/necessary, if the returned value
332  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
333  *   otherwise another call with this returned memory size should be done.
334  *   On error, a negative errno value is returned and rte_errno is set.
335  */
336 static int
337 mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
338                    const size_t flow_size, struct rte_flow_error *error)
339 {
340         const struct rte_flow_item_eth *spec = item->spec;
341         const struct rte_flow_item_eth *mask = item->mask;
342         const struct rte_flow_item_eth nic_mask = {
343                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
344                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
345                 .type = RTE_BE16(0xffff),
346         };
347         const unsigned int size = sizeof(struct ibv_flow_spec_eth);
348         struct ibv_flow_spec_eth eth = {
349                 .type = IBV_FLOW_SPEC_ETH,
350                 .size = size,
351         };
352         int ret;
353
354         if (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)
355                 return rte_flow_error_set(error, ENOTSUP,
356                                           RTE_FLOW_ERROR_TYPE_ITEM,
357                                           item,
358                                           "L2 layers already configured");
359         if (!mask)
360                 mask = &rte_flow_item_eth_mask;
361         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
362                                         (const uint8_t *)&nic_mask,
363                                         sizeof(struct rte_flow_item_eth),
364                                         error);
365         if (ret)
366                 return ret;
367         flow->layers |= MLX5_FLOW_LAYER_OUTER_L2;
368         if (size > flow_size)
369                 return size;
370         if (spec) {
371                 unsigned int i;
372
373                 memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
374                 memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
375                 eth.val.ether_type = spec->type;
376                 memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
377                 memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
378                 eth.mask.ether_type = mask->type;
379                 /* Remove unwanted bits from values. */
380                 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
381                         eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
382                         eth.val.src_mac[i] &= eth.mask.src_mac[i];
383                 }
384                 eth.val.ether_type &= eth.mask.ether_type;
385         }
386         mlx5_flow_spec_verbs_add(flow, &eth, size);
387         return size;
388 }
389
390 /**
391  * Update the VLAN tag in the Verbs Ethernet specification.
392  *
393  * @param[in, out] attr
394  *   Pointer to Verbs attributes structure.
395  * @param[in] eth
396  *   Verbs structure containing the VLAN information to copy.
397  */
398 static void
399 mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
400                            struct ibv_flow_spec_eth *eth)
401 {
402         unsigned int i;
403         enum ibv_flow_spec_type search = IBV_FLOW_SPEC_ETH;
404         struct ibv_spec_header *hdr = (struct ibv_spec_header *)
405                 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
406
407         for (i = 0; i != attr->num_of_specs; ++i) {
408                 if (hdr->type == search) {
409                         struct ibv_flow_spec_eth *e =
410                                 (struct ibv_flow_spec_eth *)hdr;
411
412                         e->val.vlan_tag = eth->val.vlan_tag;
413                         e->mask.vlan_tag = eth->mask.vlan_tag;
414                         e->val.ether_type = eth->val.ether_type;
415                         e->mask.ether_type = eth->mask.ether_type;
416                         break;
417                 }
418                 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
419         }
420 }
421
422 /**
423  * Convert the @p item into @p flow (or by updating the already present
424  * Ethernet Verbs) specification after ensuring the NIC will understand and
425  * process it correctly.
426  * If the necessary size for the conversion is greater than the @p flow_size,
427  * nothing is written in @p flow, the validation is still performed.
428  *
429  * @param[in] item
430  *   Item specification.
431  * @param[in, out] flow
432  *   Pointer to flow structure.
433  * @param[in] flow_size
434  *   Size in bytes of the available space in @p flow, if too small, nothing is
435  *   written.
436  * @param[out] error
437  *   Pointer to error structure.
438  *
439  * @return
440  *   On success the number of bytes consumed/necessary, if the returned value
441  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
442  *   otherwise another call with this returned memory size should be done.
443  *   On error, a negative errno value is returned and rte_errno is set.
444  */
445 static int
446 mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
447                     const size_t flow_size, struct rte_flow_error *error)
448 {
449         const struct rte_flow_item_vlan *spec = item->spec;
450         const struct rte_flow_item_vlan *mask = item->mask;
451         const struct rte_flow_item_vlan nic_mask = {
452                 .tci = RTE_BE16(0x0fff),
453                 .inner_type = RTE_BE16(0xffff),
454         };
455         unsigned int size = sizeof(struct ibv_flow_spec_eth);
456         struct ibv_flow_spec_eth eth = {
457                 .type = IBV_FLOW_SPEC_ETH,
458                 .size = size,
459         };
460         int ret;
461         const uint32_t l34m = MLX5_FLOW_LAYER_OUTER_L3 |
462                         MLX5_FLOW_LAYER_OUTER_L4;
463         const uint32_t vlanm = MLX5_FLOW_LAYER_OUTER_VLAN;
464         const uint32_t l2m = MLX5_FLOW_LAYER_OUTER_L2;
465
466         if (flow->layers & vlanm)
467                 return rte_flow_error_set(error, ENOTSUP,
468                                           RTE_FLOW_ERROR_TYPE_ITEM,
469                                           item,
470                                           "VLAN layer already configured");
471         else if ((flow->layers & l34m) != 0)
472                 return rte_flow_error_set(error, ENOTSUP,
473                                           RTE_FLOW_ERROR_TYPE_ITEM,
474                                           item,
475                                           "L2 layer cannot follow L3/L4 layer");
476         if (!mask)
477                 mask = &rte_flow_item_vlan_mask;
478         ret = mlx5_flow_item_acceptable
479                 (item, (const uint8_t *)mask,
480                  (const uint8_t *)&nic_mask,
481                  sizeof(struct rte_flow_item_vlan), error);
482         if (ret)
483                 return ret;
484         if (spec) {
485                 eth.val.vlan_tag = spec->tci;
486                 eth.mask.vlan_tag = mask->tci;
487                 eth.val.vlan_tag &= eth.mask.vlan_tag;
488                 eth.val.ether_type = spec->inner_type;
489                 eth.mask.ether_type = mask->inner_type;
490                 eth.val.ether_type &= eth.mask.ether_type;
491         }
492         /*
493          * From verbs perspective an empty VLAN is equivalent
494          * to a packet without VLAN layer.
495          */
496         if (!eth.mask.vlan_tag)
497                 return rte_flow_error_set(error, EINVAL,
498                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
499                                           item->spec,
500                                           "VLAN cannot be empty");
501         if (!(flow->layers & l2m)) {
502                 if (size <= flow_size)
503                         mlx5_flow_spec_verbs_add(flow, &eth, size);
504         } else {
505                 if (flow->verbs.attr)
506                         mlx5_flow_item_vlan_update(flow->verbs.attr, &eth);
507                 size = 0; /* Only an update is done in eth specification. */
508         }
509         flow->layers |= MLX5_FLOW_LAYER_OUTER_L2 |
510                 MLX5_FLOW_LAYER_OUTER_VLAN;
511         return size;
512 }
513
514 /**
515  * Convert the @p item into a Verbs specification after ensuring the NIC
516  * will understand and process it correctly.
517  * If the necessary size for the conversion is greater than the @p flow_size,
518  * nothing is written in @p flow, the validation is still performed.
519  *
520  * @param[in] item
521  *   Item specification.
522  * @param[in, out] flow
523  *   Pointer to flow structure.
524  * @param[in] flow_size
525  *   Size in bytes of the available space in @p flow, if too small, nothing is
526  *   written.
527  * @param[out] error
528  *   Pointer to error structure.
529  *
530  * @return
531  *   On success the number of bytes consumed/necessary, if the returned value
532  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
533  *   otherwise another call with this returned memory size should be done.
534  *   On error, a negative errno value is returned and rte_errno is set.
535  */
536 static int
537 mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
538                     const size_t flow_size, struct rte_flow_error *error)
539 {
540         const struct rte_flow_item_ipv4 *spec = item->spec;
541         const struct rte_flow_item_ipv4 *mask = item->mask;
542         const struct rte_flow_item_ipv4 nic_mask = {
543                 .hdr = {
544                         .src_addr = RTE_BE32(0xffffffff),
545                         .dst_addr = RTE_BE32(0xffffffff),
546                         .type_of_service = 0xff,
547                         .next_proto_id = 0xff,
548                 },
549         };
550         unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
551         struct ibv_flow_spec_ipv4_ext ipv4 = {
552                 .type = IBV_FLOW_SPEC_IPV4_EXT,
553                 .size = size,
554         };
555         int ret;
556
557         if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
558                 return rte_flow_error_set(error, ENOTSUP,
559                                           RTE_FLOW_ERROR_TYPE_ITEM,
560                                           item,
561                                           "multiple L3 layers not supported");
562         else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
563                 return rte_flow_error_set(error, ENOTSUP,
564                                           RTE_FLOW_ERROR_TYPE_ITEM,
565                                           item,
566                                           "L3 cannot follow an L4 layer.");
567         if (!mask)
568                 mask = &rte_flow_item_ipv4_mask;
569         ret = mlx5_flow_item_acceptable
570                 (item, (const uint8_t *)mask,
571                  (const uint8_t *)&nic_mask,
572                  sizeof(struct rte_flow_item_ipv4), error);
573         if (ret < 0)
574                 return ret;
575         flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
576         if (spec) {
577                 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
578                         .src_ip = spec->hdr.src_addr,
579                         .dst_ip = spec->hdr.dst_addr,
580                         .proto = spec->hdr.next_proto_id,
581                         .tos = spec->hdr.type_of_service,
582                 };
583                 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
584                         .src_ip = mask->hdr.src_addr,
585                         .dst_ip = mask->hdr.dst_addr,
586                         .proto = mask->hdr.next_proto_id,
587                         .tos = mask->hdr.type_of_service,
588                 };
589                 /* Remove unwanted bits from values. */
590                 ipv4.val.src_ip &= ipv4.mask.src_ip;
591                 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
592                 ipv4.val.proto &= ipv4.mask.proto;
593                 ipv4.val.tos &= ipv4.mask.tos;
594         }
595         flow->l3_protocol_en = !!ipv4.mask.proto;
596         flow->l3_protocol = ipv4.val.proto;
597         if (size <= flow_size)
598                 mlx5_flow_spec_verbs_add(flow, &ipv4, size);
599         return size;
600 }
601
602 /**
603  * Convert the @p item into a Verbs specification after ensuring the NIC
604  * will understand and process it correctly.
605  * If the necessary size for the conversion is greater than the @p flow_size,
606  * nothing is written in @p flow, the validation is still performed.
607  *
608  * @param[in] item
609  *   Item specification.
610  * @param[in, out] flow
611  *   Pointer to flow structure.
612  * @param[in] flow_size
613  *   Size in bytes of the available space in @p flow, if too small, nothing is
614  *   written.
615  * @param[out] error
616  *   Pointer to error structure.
617  *
618  * @return
619  *   On success the number of bytes consumed/necessary, if the returned value
620  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
621  *   otherwise another call with this returned memory size should be done.
622  *   On error, a negative errno value is returned and rte_errno is set.
623  */
624 static int
625 mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
626                     const size_t flow_size, struct rte_flow_error *error)
627 {
628         const struct rte_flow_item_ipv6 *spec = item->spec;
629         const struct rte_flow_item_ipv6 *mask = item->mask;
630         const struct rte_flow_item_ipv6 nic_mask = {
631                 .hdr = {
632                         .src_addr =
633                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
634                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
635                         .dst_addr =
636                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
637                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
638                         .vtc_flow = RTE_BE32(0xffffffff),
639                         .proto = 0xff,
640                         .hop_limits = 0xff,
641                 },
642         };
643         unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
644         struct ibv_flow_spec_ipv6 ipv6 = {
645                 .type = IBV_FLOW_SPEC_IPV6,
646                 .size = size,
647         };
648         int ret;
649
650         if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
651                 return rte_flow_error_set(error, ENOTSUP,
652                                           RTE_FLOW_ERROR_TYPE_ITEM,
653                                           item,
654                                           "multiple L3 layers not supported");
655         else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
656                 return rte_flow_error_set(error, ENOTSUP,
657                                           RTE_FLOW_ERROR_TYPE_ITEM,
658                                           item,
659                                           "L3 cannot follow an L4 layer.");
660         if (!mask)
661                 mask = &rte_flow_item_ipv6_mask;
662         ret = mlx5_flow_item_acceptable
663                 (item, (const uint8_t *)mask,
664                  (const uint8_t *)&nic_mask,
665                  sizeof(struct rte_flow_item_ipv6), error);
666         if (ret < 0)
667                 return ret;
668         flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
669         if (spec) {
670                 unsigned int i;
671                 uint32_t vtc_flow_val;
672                 uint32_t vtc_flow_mask;
673
674                 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
675                        RTE_DIM(ipv6.val.src_ip));
676                 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
677                        RTE_DIM(ipv6.val.dst_ip));
678                 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
679                        RTE_DIM(ipv6.mask.src_ip));
680                 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
681                        RTE_DIM(ipv6.mask.dst_ip));
682                 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
683                 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
684                 ipv6.val.flow_label =
685                         rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
686                                          IPV6_HDR_FL_SHIFT);
687                 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
688                                          IPV6_HDR_TC_SHIFT;
689                 ipv6.val.next_hdr = spec->hdr.proto;
690                 ipv6.val.hop_limit = spec->hdr.hop_limits;
691                 ipv6.mask.flow_label =
692                         rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
693                                          IPV6_HDR_FL_SHIFT);
694                 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
695                                           IPV6_HDR_TC_SHIFT;
696                 ipv6.mask.next_hdr = mask->hdr.proto;
697                 ipv6.mask.hop_limit = mask->hdr.hop_limits;
698                 /* Remove unwanted bits from values. */
699                 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
700                         ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
701                         ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
702                 }
703                 ipv6.val.flow_label &= ipv6.mask.flow_label;
704                 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
705                 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
706                 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
707         }
708         flow->l3_protocol_en = !!ipv6.mask.next_hdr;
709         flow->l3_protocol = ipv6.val.next_hdr;
710         if (size <= flow_size)
711                 mlx5_flow_spec_verbs_add(flow, &ipv6, size);
712         return size;
713 }
714
715 /**
716  * Convert the @p item into a Verbs specification after ensuring the NIC
717  * will understand and process it correctly.
718  * If the necessary size for the conversion is greater than the @p flow_size,
719  * nothing is written in @p flow, the validation is still performed.
720  *
721  * @param[in] item
722  *   Item specification.
723  * @param[in, out] flow
724  *   Pointer to flow structure.
725  * @param[in] flow_size
726  *   Size in bytes of the available space in @p flow, if too small, nothing is
727  *   written.
728  * @param[out] error
729  *   Pointer to error structure.
730  *
731  * @return
732  *   On success the number of bytes consumed/necessary, if the returned value
733  *   is lesser or equal to @p flow_size, the @p item has fully been converted,
734  *   otherwise another call with this returned memory size should be done.
735  *   On error, a negative errno value is returned and rte_errno is set.
736  */
737 static int
738 mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
739                    const size_t flow_size, struct rte_flow_error *error)
740 {
741         const struct rte_flow_item_udp *spec = item->spec;
742         const struct rte_flow_item_udp *mask = item->mask;
743         unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
744         struct ibv_flow_spec_tcp_udp udp = {
745                 .type = IBV_FLOW_SPEC_UDP,
746                 .size = size,
747         };
748         int ret;
749
750         if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
751                 return rte_flow_error_set(error, ENOTSUP,
752                                           RTE_FLOW_ERROR_TYPE_ITEM,
753                                           item,
754                                           "L3 is mandatory to filter on L4");
755         if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
756                 return rte_flow_error_set(error, ENOTSUP,
757                                           RTE_FLOW_ERROR_TYPE_ITEM,
758                                           item,
759                                           "L4 layer is already present");
760         if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
761                 return rte_flow_error_set(error, ENOTSUP,
762                                           RTE_FLOW_ERROR_TYPE_ITEM,
763                                           item,
764                                           "protocol filtering not compatible"
765                                           " with UDP layer");
766         if (!mask)
767                 mask = &rte_flow_item_udp_mask;
768         ret = mlx5_flow_item_acceptable
769                 (item, (const uint8_t *)mask,
770                  (const uint8_t *)&rte_flow_item_udp_mask,
771                  sizeof(struct rte_flow_item_udp), error);
772         if (ret < 0)
773                 return ret;
774         flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
775         if (size > flow_size)
776                 return size;
777         if (spec) {
778                 udp.val.dst_port = spec->hdr.dst_port;
779                 udp.val.src_port = spec->hdr.src_port;
780                 udp.mask.dst_port = mask->hdr.dst_port;
781                 udp.mask.src_port = mask->hdr.src_port;
782                 /* Remove unwanted bits from values. */
783                 udp.val.src_port &= udp.mask.src_port;
784                 udp.val.dst_port &= udp.mask.dst_port;
785         }
786         mlx5_flow_spec_verbs_add(flow, &udp, size);
787         return size;
788 }
789
790 /**
791  * Convert the @p pattern into a Verbs specifications after ensuring the NIC
792  * will understand and process it correctly.
793  * The conversion is performed item per item, each of them is written into
794  * the @p flow if its size is lesser or equal to @p flow_size.
795  * Validation and memory consumption computation are still performed until the
796  * end of @p pattern, unless an error is encountered.
797  *
798  * @param[in] pattern
799  *   Flow pattern.
800  * @param[in, out] flow
801  *   Pointer to the rte_flow structure.
802  * @param[in] flow_size
803  *   Size in bytes of the available space in @p flow, if too small some
804  *   garbage may be present.
805  * @param[out] error
806  *   Pointer to error structure.
807  *
808  * @return
809  *   On success the number of bytes consumed/necessary, if the returned value
810  *   is lesser or equal to @p flow_size, the @pattern  has fully been
811  *   converted, otherwise another call with this returned memory size should
812  *   be done.
813  *   On error, a negative errno value is returned and rte_errno is set.
814  */
815 static int
816 mlx5_flow_items(const struct rte_flow_item pattern[],
817                 struct rte_flow *flow, const size_t flow_size,
818                 struct rte_flow_error *error)
819 {
820         int remain = flow_size;
821         size_t size = 0;
822
823         for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
824                 int ret = 0;
825
826                 switch (pattern->type) {
827                 case RTE_FLOW_ITEM_TYPE_VOID:
828                         break;
829                 case RTE_FLOW_ITEM_TYPE_ETH:
830                         ret = mlx5_flow_item_eth(pattern, flow, remain, error);
831                         break;
832                 case RTE_FLOW_ITEM_TYPE_VLAN:
833                         ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
834                         break;
835                 case RTE_FLOW_ITEM_TYPE_IPV4:
836                         ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
837                         break;
838                 case RTE_FLOW_ITEM_TYPE_IPV6:
839                         ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
840                         break;
841                 case RTE_FLOW_ITEM_TYPE_UDP:
842                         ret = mlx5_flow_item_udp(pattern, flow, remain, error);
843                         break;
844                 default:
845                         return rte_flow_error_set(error, ENOTSUP,
846                                                   RTE_FLOW_ERROR_TYPE_ITEM,
847                                                   pattern,
848                                                   "item not supported");
849                 }
850                 if (ret < 0)
851                         return ret;
852                 if (remain > ret)
853                         remain -= ret;
854                 else
855                         remain = 0;
856                 size += ret;
857         }
858         if (!flow->layers) {
859                 const struct rte_flow_item item = {
860                         .type = RTE_FLOW_ITEM_TYPE_ETH,
861                 };
862
863                 return mlx5_flow_item_eth(&item, flow, flow_size, error);
864         }
865         return size;
866 }
867
868 /**
869  * Convert the @p action into a Verbs specification after ensuring the NIC
870  * will understand and process it correctly.
871  * If the necessary size for the conversion is greater than the @p flow_size,
872  * nothing is written in @p flow, the validation is still performed.
873  *
874  * @param[in] action
875  *   Action configuration.
876  * @param[in, out] flow
877  *   Pointer to flow structure.
878  * @param[in] flow_size
879  *   Size in bytes of the available space in @p flow, if too small, nothing is
880  *   written.
881  * @param[out] error
882  *   Pointer to error structure.
883  *
884  * @return
885  *   On success the number of bytes consumed/necessary, if the returned value
886  *   is lesser or equal to @p flow_size, the @p action has fully been
887  *   converted, otherwise another call with this returned memory size should
888  *   be done.
889  *   On error, a negative errno value is returned and rte_errno is set.
890  */
891 static int
892 mlx5_flow_action_drop(const struct rte_flow_action *action,
893                       struct rte_flow *flow, const size_t flow_size,
894                       struct rte_flow_error *error)
895 {
896         unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
897         struct ibv_flow_spec_action_drop drop = {
898                         .type = IBV_FLOW_SPEC_ACTION_DROP,
899                         .size = size,
900         };
901
902         if (flow->fate)
903                 return rte_flow_error_set(error, ENOTSUP,
904                                           RTE_FLOW_ERROR_TYPE_ACTION,
905                                           action,
906                                           "multiple fate actions are not"
907                                           " supported");
908         if (size < flow_size)
909                 mlx5_flow_spec_verbs_add(flow, &drop, size);
910         flow->fate |= MLX5_FLOW_FATE_DROP;
911         return size;
912 }
913
914 /**
915  * Convert the @p action into @p flow after ensuring the NIC will understand
916  * and process it correctly.
917  *
918  * @param[in] dev
919  *   Pointer to Ethernet device structure.
920  * @param[in] action
921  *   Action configuration.
922  * @param[in, out] flow
923  *   Pointer to flow structure.
924  * @param[out] error
925  *   Pointer to error structure.
926  *
927  * @return
928  *   0 on success, a negative errno value otherwise and rte_errno is set.
929  */
930 static int
931 mlx5_flow_action_queue(struct rte_eth_dev *dev,
932                        const struct rte_flow_action *action,
933                        struct rte_flow *flow,
934                        struct rte_flow_error *error)
935 {
936         struct priv *priv = dev->data->dev_private;
937         const struct rte_flow_action_queue *queue = action->conf;
938
939         if (flow->fate)
940                 return rte_flow_error_set(error, ENOTSUP,
941                                           RTE_FLOW_ERROR_TYPE_ACTION,
942                                           action,
943                                           "multiple fate actions are not"
944                                           " supported");
945         if (queue->index >= priv->rxqs_n)
946                 return rte_flow_error_set(error, EINVAL,
947                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
948                                           &queue->index,
949                                           "queue index out of range");
950         if (!(*priv->rxqs)[queue->index])
951                 return rte_flow_error_set(error, EINVAL,
952                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
953                                           &queue->index,
954                                           "queue is not configured");
955         flow->queue = queue->index;
956         flow->fate |= MLX5_FLOW_FATE_QUEUE;
957         return 0;
958 }
959
960 /**
961  * Convert the @p action into @p flow after ensuring the NIC will understand
962  * and process it correctly.
963  * The conversion is performed action per action, each of them is written into
964  * the @p flow if its size is lesser or equal to @p flow_size.
965  * Validation and memory consumption computation are still performed until the
966  * end of @p action, unless an error is encountered.
967  *
968  * @param[in] dev
969  *   Pointer to Ethernet device structure.
970  * @param[in] actions
971  *   Pointer to flow actions array.
972  * @param[in, out] flow
973  *   Pointer to the rte_flow structure.
974  * @param[in] flow_size
975  *   Size in bytes of the available space in @p flow, if too small some
976  *   garbage may be present.
977  * @param[out] error
978  *   Pointer to error structure.
979  *
980  * @return
981  *   On success the number of bytes consumed/necessary, if the returned value
982  *   is lesser or equal to @p flow_size, the @p actions has fully been
983  *   converted, otherwise another call with this returned memory size should
984  *   be done.
985  *   On error, a negative errno value is returned and rte_errno is set.
986  */
987 static int
988 mlx5_flow_actions(struct rte_eth_dev *dev,
989                   const struct rte_flow_action actions[],
990                   struct rte_flow *flow, const size_t flow_size,
991                   struct rte_flow_error *error)
992 {
993         size_t size = 0;
994         int remain = flow_size;
995         int ret = 0;
996
997         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
998                 switch (actions->type) {
999                 case RTE_FLOW_ACTION_TYPE_VOID:
1000                         break;
1001                 case RTE_FLOW_ACTION_TYPE_DROP:
1002                         ret = mlx5_flow_action_drop(actions, flow, remain,
1003                                                     error);
1004                         break;
1005                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1006                         ret = mlx5_flow_action_queue(dev, actions, flow, error);
1007                         break;
1008                 default:
1009                         return rte_flow_error_set(error, ENOTSUP,
1010                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1011                                                   actions,
1012                                                   "action not supported");
1013                 }
1014                 if (ret < 0)
1015                         return ret;
1016                 if (remain > ret)
1017                         remain -= ret;
1018                 else
1019                         remain = 0;
1020                 size += ret;
1021         }
1022         if (!flow->fate)
1023                 return rte_flow_error_set(error, ENOTSUP,
1024                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1025                                           NULL,
1026                                           "no fate action found");
1027         return size;
1028 }
1029
1030 /**
1031  * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
1032  * after ensuring the NIC will understand and process it correctly.
1033  * The conversion is only performed item/action per item/action, each of
1034  * them is written into the @p flow if its size is lesser or equal to @p
1035  * flow_size.
1036  * Validation and memory consumption computation are still performed until the
1037  * end, unless an error is encountered.
1038  *
1039  * @param[in] dev
1040  *   Pointer to Ethernet device.
1041  * @param[in, out] flow
1042  *   Pointer to flow structure.
1043  * @param[in] flow_size
1044  *   Size in bytes of the available space in @p flow, if too small some
1045  *   garbage may be present.
1046  * @param[in] attributes
1047  *   Flow rule attributes.
1048  * @param[in] pattern
1049  *   Pattern specification (list terminated by the END pattern item).
1050  * @param[in] actions
1051  *   Associated actions (list terminated by the END action).
1052  * @param[out] error
1053  *   Perform verbose error reporting if not NULL.
1054  *
1055  * @return
1056  *   On success the number of bytes consumed/necessary, if the returned value
1057  *   is lesser or equal to @p flow_size, the flow has fully been converted and
1058  *   can be applied, otherwise another call with this returned memory size
1059  *   should be done.
1060  *   On error, a negative errno value is returned and rte_errno is set.
1061  */
1062 static int
1063 mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
1064                 const size_t flow_size,
1065                 const struct rte_flow_attr *attributes,
1066                 const struct rte_flow_item pattern[],
1067                 const struct rte_flow_action actions[],
1068                 struct rte_flow_error *error)
1069 {
1070         struct rte_flow local_flow = { .layers = 0, };
1071         size_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);
1072         int remain = (flow_size > size) ? flow_size - size : 0;
1073         int ret;
1074
1075         if (!remain)
1076                 flow = &local_flow;
1077         ret = mlx5_flow_attributes(dev, attributes, flow, error);
1078         if (ret < 0)
1079                 return ret;
1080         ret = mlx5_flow_items(pattern, flow, remain, error);
1081         if (ret < 0)
1082                 return ret;
1083         size += ret;
1084         remain = (flow_size > size) ? flow_size - size : 0;
1085         ret = mlx5_flow_actions(dev, actions, flow, remain, error);
1086         if (ret < 0)
1087                 return ret;
1088         size += ret;
1089         if (size <= flow_size)
1090                 flow->verbs.attr->priority = flow->attributes.priority;
1091         return size;
1092 }
1093
1094 /**
1095  * Validate a flow supported by the NIC.
1096  *
1097  * @see rte_flow_validate()
1098  * @see rte_flow_ops
1099  */
1100 int
1101 mlx5_flow_validate(struct rte_eth_dev *dev,
1102                    const struct rte_flow_attr *attr,
1103                    const struct rte_flow_item items[],
1104                    const struct rte_flow_action actions[],
1105                    struct rte_flow_error *error)
1106 {
1107         int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
1108
1109         if (ret < 0)
1110                 return ret;
1111         return 0;
1112 }
1113
1114 /**
1115  * Remove the flow.
1116  *
1117  * @param[in] dev
1118  *   Pointer to Ethernet device.
1119  * @param[in, out] flow
1120  *   Pointer to flow structure.
1121  */
1122 static void
1123 mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1124 {
1125         if (flow->fate & MLX5_FLOW_FATE_DROP) {
1126                 if (flow->verbs.flow) {
1127                         claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
1128                         flow->verbs.flow = NULL;
1129                 }
1130         }
1131         if (flow->verbs.hrxq) {
1132                 if (flow->fate & MLX5_FLOW_FATE_DROP)
1133                         mlx5_hrxq_drop_release(dev);
1134                 else if (flow->fate & MLX5_FLOW_FATE_QUEUE)
1135                         mlx5_hrxq_release(dev, flow->verbs.hrxq);
1136                 flow->verbs.hrxq = NULL;
1137         }
1138 }
1139
1140 /**
1141  * Apply the flow.
1142  *
1143  * @param[in] dev
1144  *   Pointer to Ethernet device structure.
1145  * @param[in, out] flow
1146  *   Pointer to flow structure.
1147  * @param[out] error
1148  *   Pointer to error structure.
1149  *
1150  * @return
1151  *   0 on success, a negative errno value otherwise and rte_errno is set.
1152  */
1153 static int
1154 mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1155                 struct rte_flow_error *error)
1156 {
1157         if (flow->fate & MLX5_FLOW_FATE_DROP) {
1158                 flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
1159                 if (!flow->verbs.hrxq)
1160                         return rte_flow_error_set
1161                                 (error, errno,
1162                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1163                                  NULL,
1164                                  "cannot allocate Drop queue");
1165         } else if (flow->fate & MLX5_FLOW_FATE_QUEUE) {
1166                 struct mlx5_hrxq *hrxq;
1167
1168                 hrxq = mlx5_hrxq_get(dev, rss_hash_default_key,
1169                                      rss_hash_default_key_len, 0,
1170                                      &flow->queue, 1, 0, 0);
1171                 if (!hrxq)
1172                         hrxq = mlx5_hrxq_new(dev, rss_hash_default_key,
1173                                              rss_hash_default_key_len, 0,
1174                                              &flow->queue, 1, 0, 0);
1175                 if (!hrxq)
1176                         return rte_flow_error_set(error, rte_errno,
1177                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1178                                         NULL,
1179                                         "cannot create flow");
1180                 flow->verbs.hrxq = hrxq;
1181         }
1182         flow->verbs.flow =
1183                 mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
1184         if (!flow->verbs.flow) {
1185                 if (flow->fate & MLX5_FLOW_FATE_DROP)
1186                         mlx5_hrxq_drop_release(dev);
1187                 else
1188                         mlx5_hrxq_release(dev, flow->verbs.hrxq);
1189                 flow->verbs.hrxq = NULL;
1190                 return rte_flow_error_set(error, errno,
1191                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1192                                           NULL,
1193                                           "kernel module refuses to create"
1194                                           " flow");
1195         }
1196         return 0;
1197 }
1198
1199 /**
1200  * Create a flow and add it to @p list.
1201  *
1202  * @param dev
1203  *   Pointer to Ethernet device.
1204  * @param list
1205  *   Pointer to a TAILQ flow list.
1206  * @param[in] attr
1207  *   Flow rule attributes.
1208  * @param[in] items
1209  *   Pattern specification (list terminated by the END pattern item).
1210  * @param[in] actions
1211  *   Associated actions (list terminated by the END action).
1212  * @param[out] error
1213  *   Perform verbose error reporting if not NULL.
1214  *
1215  * @return
1216  *   A flow on success, NULL otherwise and rte_errno is set.
1217  */
1218 static struct rte_flow *
1219 mlx5_flow_list_create(struct rte_eth_dev *dev,
1220                       struct mlx5_flows *list,
1221                       const struct rte_flow_attr *attr,
1222                       const struct rte_flow_item items[],
1223                       const struct rte_flow_action actions[],
1224                       struct rte_flow_error *error)
1225 {
1226         struct rte_flow *flow;
1227         size_t size;
1228         int ret;
1229
1230         ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
1231         if (ret < 0)
1232                 return NULL;
1233         size = ret;
1234         flow = rte_zmalloc(__func__, size, 0);
1235         if (!flow) {
1236                 rte_flow_error_set(error, ENOMEM,
1237                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1238                                    NULL,
1239                                    "cannot allocate memory");
1240                 return NULL;
1241         }
1242         flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
1243         flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
1244         ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
1245         if (ret < 0)
1246                 goto error;
1247         assert((size_t)ret == size);
1248         if (dev->data->dev_started) {
1249                 ret = mlx5_flow_apply(dev, flow, error);
1250                 if (ret < 0)
1251                         goto error;
1252         }
1253         TAILQ_INSERT_TAIL(list, flow, next);
1254         return flow;
1255 error:
1256         ret = rte_errno; /* Save rte_errno before cleanup. */
1257         mlx5_flow_remove(dev, flow);
1258         rte_free(flow);
1259         rte_errno = ret; /* Restore rte_errno. */
1260         return NULL;
1261 }
1262
1263 /**
1264  * Create a flow.
1265  *
1266  * @see rte_flow_create()
1267  * @see rte_flow_ops
1268  */
1269 struct rte_flow *
1270 mlx5_flow_create(struct rte_eth_dev *dev,
1271                  const struct rte_flow_attr *attr,
1272                  const struct rte_flow_item items[],
1273                  const struct rte_flow_action actions[],
1274                  struct rte_flow_error *error)
1275 {
1276         return mlx5_flow_list_create
1277                 (dev, &((struct priv *)dev->data->dev_private)->flows,
1278                  attr, items, actions, error);
1279 }
1280
1281 /**
1282  * Destroy a flow in a list.
1283  *
1284  * @param dev
1285  *   Pointer to Ethernet device.
1286  * @param list
1287  *   Pointer to a TAILQ flow list.
1288  * @param[in] flow
1289  *   Flow to destroy.
1290  */
1291 static void
1292 mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
1293                        struct rte_flow *flow)
1294 {
1295         mlx5_flow_remove(dev, flow);
1296         TAILQ_REMOVE(list, flow, next);
1297         rte_free(flow);
1298 }
1299
1300 /**
1301  * Destroy all flows.
1302  *
1303  * @param dev
1304  *   Pointer to Ethernet device.
1305  * @param list
1306  *   Pointer to a TAILQ flow list.
1307  */
1308 void
1309 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
1310 {
1311         while (!TAILQ_EMPTY(list)) {
1312                 struct rte_flow *flow;
1313
1314                 flow = TAILQ_FIRST(list);
1315                 mlx5_flow_list_destroy(dev, list, flow);
1316         }
1317 }
1318
1319 /**
1320  * Remove all flows.
1321  *
1322  * @param dev
1323  *   Pointer to Ethernet device.
1324  * @param list
1325  *   Pointer to a TAILQ flow list.
1326  */
1327 void
1328 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
1329 {
1330         struct rte_flow *flow;
1331
1332         TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
1333                 mlx5_flow_remove(dev, flow);
1334 }
1335
1336 /**
1337  * Add all flows.
1338  *
1339  * @param dev
1340  *   Pointer to Ethernet device.
1341  * @param list
1342  *   Pointer to a TAILQ flow list.
1343  *
1344  * @return
1345  *   0 on success, a negative errno value otherwise and rte_errno is set.
1346  */
1347 int
1348 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
1349 {
1350         struct rte_flow *flow;
1351         struct rte_flow_error error;
1352         int ret = 0;
1353
1354         TAILQ_FOREACH(flow, list, next) {
1355                 ret = mlx5_flow_apply(dev, flow, &error);
1356                 if (ret < 0)
1357                         goto error;
1358         }
1359         return 0;
1360 error:
1361         ret = rte_errno; /* Save rte_errno before cleanup. */
1362         mlx5_flow_stop(dev, list);
1363         rte_errno = ret; /* Restore rte_errno. */
1364         return -rte_errno;
1365 }
1366
1367 /**
1368  * Verify the flow list is empty
1369  *
1370  * @param dev
1371  *  Pointer to Ethernet device.
1372  *
1373  * @return the number of flows not released.
1374  */
1375 int
1376 mlx5_flow_verify(struct rte_eth_dev *dev)
1377 {
1378         struct priv *priv = dev->data->dev_private;
1379         struct rte_flow *flow;
1380         int ret = 0;
1381
1382         TAILQ_FOREACH(flow, &priv->flows, next) {
1383                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
1384                         dev->data->port_id, (void *)flow);
1385                 ++ret;
1386         }
1387         return ret;
1388 }
1389
1390 /**
1391  * Enable a control flow configured from the control plane.
1392  *
1393  * @param dev
1394  *   Pointer to Ethernet device.
1395  * @param eth_spec
1396  *   An Ethernet flow spec to apply.
1397  * @param eth_mask
1398  *   An Ethernet flow mask to apply.
1399  * @param vlan_spec
1400  *   A VLAN flow spec to apply.
1401  * @param vlan_mask
1402  *   A VLAN flow mask to apply.
1403  *
1404  * @return
1405  *   0 on success, a negative errno value otherwise and rte_errno is set.
1406  */
1407 int
1408 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
1409                     struct rte_flow_item_eth *eth_spec,
1410                     struct rte_flow_item_eth *eth_mask,
1411                     struct rte_flow_item_vlan *vlan_spec,
1412                     struct rte_flow_item_vlan *vlan_mask)
1413 {
1414         struct priv *priv = dev->data->dev_private;
1415         const struct rte_flow_attr attr = {
1416                 .ingress = 1,
1417                 .priority = priv->config.flow_prio - 1,
1418         };
1419         struct rte_flow_item items[] = {
1420                 {
1421                         .type = RTE_FLOW_ITEM_TYPE_ETH,
1422                         .spec = eth_spec,
1423                         .last = NULL,
1424                         .mask = eth_mask,
1425                 },
1426                 {
1427                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
1428                                 RTE_FLOW_ITEM_TYPE_END,
1429                         .spec = vlan_spec,
1430                         .last = NULL,
1431                         .mask = vlan_mask,
1432                 },
1433                 {
1434                         .type = RTE_FLOW_ITEM_TYPE_END,
1435                 },
1436         };
1437         uint16_t queue[priv->reta_idx_n];
1438         struct rte_flow_action_rss action_rss = {
1439                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
1440                 .level = 0,
1441                 .types = priv->rss_conf.rss_hf,
1442                 .key_len = priv->rss_conf.rss_key_len,
1443                 .queue_num = priv->reta_idx_n,
1444                 .key = priv->rss_conf.rss_key,
1445                 .queue = queue,
1446         };
1447         struct rte_flow_action actions[] = {
1448                 {
1449                         .type = RTE_FLOW_ACTION_TYPE_RSS,
1450                         .conf = &action_rss,
1451                 },
1452                 {
1453                         .type = RTE_FLOW_ACTION_TYPE_END,
1454                 },
1455         };
1456         struct rte_flow *flow;
1457         struct rte_flow_error error;
1458         unsigned int i;
1459
1460         if (!priv->reta_idx_n) {
1461                 rte_errno = EINVAL;
1462                 return -rte_errno;
1463         }
1464         for (i = 0; i != priv->reta_idx_n; ++i)
1465                 queue[i] = (*priv->reta_idx)[i];
1466         flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
1467                                      actions, &error);
1468         if (!flow)
1469                 return -rte_errno;
1470         return 0;
1471 }
1472
1473 /**
1474  * Enable a flow control configured from the control plane.
1475  *
1476  * @param dev
1477  *   Pointer to Ethernet device.
1478  * @param eth_spec
1479  *   An Ethernet flow spec to apply.
1480  * @param eth_mask
1481  *   An Ethernet flow mask to apply.
1482  *
1483  * @return
1484  *   0 on success, a negative errno value otherwise and rte_errno is set.
1485  */
1486 int
1487 mlx5_ctrl_flow(struct rte_eth_dev *dev,
1488                struct rte_flow_item_eth *eth_spec,
1489                struct rte_flow_item_eth *eth_mask)
1490 {
1491         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
1492 }
1493
1494 /**
1495  * Destroy a flow.
1496  *
1497  * @see rte_flow_destroy()
1498  * @see rte_flow_ops
1499  */
1500 int
1501 mlx5_flow_destroy(struct rte_eth_dev *dev,
1502                   struct rte_flow *flow,
1503                   struct rte_flow_error *error __rte_unused)
1504 {
1505         struct priv *priv = dev->data->dev_private;
1506
1507         mlx5_flow_list_destroy(dev, &priv->flows, flow);
1508         return 0;
1509 }
1510
1511 /**
1512  * Destroy all flows.
1513  *
1514  * @see rte_flow_flush()
1515  * @see rte_flow_ops
1516  */
1517 int
1518 mlx5_flow_flush(struct rte_eth_dev *dev,
1519                 struct rte_flow_error *error __rte_unused)
1520 {
1521         struct priv *priv = dev->data->dev_private;
1522
1523         mlx5_flow_list_flush(dev, &priv->flows);
1524         return 0;
1525 }
1526
1527 /**
1528  * Isolated mode.
1529  *
1530  * @see rte_flow_isolate()
1531  * @see rte_flow_ops
1532  */
1533 int
1534 mlx5_flow_isolate(struct rte_eth_dev *dev,
1535                   int enable,
1536                   struct rte_flow_error *error)
1537 {
1538         struct priv *priv = dev->data->dev_private;
1539
1540         if (dev->data->dev_started) {
1541                 rte_flow_error_set(error, EBUSY,
1542                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1543                                    NULL,
1544                                    "port must be stopped first");
1545                 return -rte_errno;
1546         }
1547         priv->isolated = !!enable;
1548         if (enable)
1549                 dev->dev_ops = &mlx5_dev_ops_isolate;
1550         else
1551                 dev->dev_ops = &mlx5_dev_ops;
1552         return 0;
1553 }
1554
1555 /**
1556  * Convert a flow director filter to a generic flow.
1557  *
1558  * @param dev
1559  *   Pointer to Ethernet device.
1560  * @param fdir_filter
1561  *   Flow director filter to add.
1562  * @param attributes
1563  *   Generic flow parameters structure.
1564  *
1565  * @return
1566  *   0 on success, a negative errno value otherwise and rte_errno is set.
1567  */
1568 static int
1569 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
1570                          const struct rte_eth_fdir_filter *fdir_filter,
1571                          struct mlx5_fdir *attributes)
1572 {
1573         struct priv *priv = dev->data->dev_private;
1574         const struct rte_eth_fdir_input *input = &fdir_filter->input;
1575         const struct rte_eth_fdir_masks *mask =
1576                 &dev->data->dev_conf.fdir_conf.mask;
1577
1578         /* Validate queue number. */
1579         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
1580                 DRV_LOG(ERR, "port %u invalid queue number %d",
1581                         dev->data->port_id, fdir_filter->action.rx_queue);
1582                 rte_errno = EINVAL;
1583                 return -rte_errno;
1584         }
1585         attributes->attr.ingress = 1;
1586         attributes->items[0] = (struct rte_flow_item) {
1587                 .type = RTE_FLOW_ITEM_TYPE_ETH,
1588                 .spec = &attributes->l2,
1589                 .mask = &attributes->l2_mask,
1590         };
1591         switch (fdir_filter->action.behavior) {
1592         case RTE_ETH_FDIR_ACCEPT:
1593                 attributes->actions[0] = (struct rte_flow_action){
1594                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
1595                         .conf = &attributes->queue,
1596                 };
1597                 break;
1598         case RTE_ETH_FDIR_REJECT:
1599                 attributes->actions[0] = (struct rte_flow_action){
1600                         .type = RTE_FLOW_ACTION_TYPE_DROP,
1601                 };
1602                 break;
1603         default:
1604                 DRV_LOG(ERR, "port %u invalid behavior %d",
1605                         dev->data->port_id,
1606                         fdir_filter->action.behavior);
1607                 rte_errno = ENOTSUP;
1608                 return -rte_errno;
1609         }
1610         attributes->queue.index = fdir_filter->action.rx_queue;
1611         /* Handle L3. */
1612         switch (fdir_filter->input.flow_type) {
1613         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1614         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1615         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1616                 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
1617                         .src_addr = input->flow.ip4_flow.src_ip,
1618                         .dst_addr = input->flow.ip4_flow.dst_ip,
1619                         .time_to_live = input->flow.ip4_flow.ttl,
1620                         .type_of_service = input->flow.ip4_flow.tos,
1621                         .next_proto_id = input->flow.ip4_flow.proto,
1622                 };
1623                 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
1624                         .src_addr = mask->ipv4_mask.src_ip,
1625                         .dst_addr = mask->ipv4_mask.dst_ip,
1626                         .time_to_live = mask->ipv4_mask.ttl,
1627                         .type_of_service = mask->ipv4_mask.tos,
1628                         .next_proto_id = mask->ipv4_mask.proto,
1629                 };
1630                 attributes->items[1] = (struct rte_flow_item){
1631                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
1632                         .spec = &attributes->l3,
1633                         .mask = &attributes->l3_mask,
1634                 };
1635                 break;
1636         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1637         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1638         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1639                 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
1640                         .hop_limits = input->flow.ipv6_flow.hop_limits,
1641                         .proto = input->flow.ipv6_flow.proto,
1642                 };
1643
1644                 memcpy(attributes->l3.ipv6.hdr.src_addr,
1645                        input->flow.ipv6_flow.src_ip,
1646                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1647                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
1648                        input->flow.ipv6_flow.dst_ip,
1649                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
1650                 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
1651                        mask->ipv6_mask.src_ip,
1652                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1653                 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
1654                        mask->ipv6_mask.dst_ip,
1655                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
1656                 attributes->items[1] = (struct rte_flow_item){
1657                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
1658                         .spec = &attributes->l3,
1659                         .mask = &attributes->l3_mask,
1660                 };
1661                 break;
1662         default:
1663                 DRV_LOG(ERR, "port %u invalid flow type%d",
1664                         dev->data->port_id, fdir_filter->input.flow_type);
1665                 rte_errno = ENOTSUP;
1666                 return -rte_errno;
1667         }
1668         /* Handle L4. */
1669         switch (fdir_filter->input.flow_type) {
1670         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
1671                 attributes->l4.udp.hdr = (struct udp_hdr){
1672                         .src_port = input->flow.udp4_flow.src_port,
1673                         .dst_port = input->flow.udp4_flow.dst_port,
1674                 };
1675                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1676                         .src_port = mask->src_port_mask,
1677                         .dst_port = mask->dst_port_mask,
1678                 };
1679                 attributes->items[2] = (struct rte_flow_item){
1680                         .type = RTE_FLOW_ITEM_TYPE_UDP,
1681                         .spec = &attributes->l4,
1682                         .mask = &attributes->l4_mask,
1683                 };
1684                 break;
1685         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
1686                 attributes->l4.tcp.hdr = (struct tcp_hdr){
1687                         .src_port = input->flow.tcp4_flow.src_port,
1688                         .dst_port = input->flow.tcp4_flow.dst_port,
1689                 };
1690                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1691                         .src_port = mask->src_port_mask,
1692                         .dst_port = mask->dst_port_mask,
1693                 };
1694                 attributes->items[2] = (struct rte_flow_item){
1695                         .type = RTE_FLOW_ITEM_TYPE_TCP,
1696                         .spec = &attributes->l4,
1697                         .mask = &attributes->l4_mask,
1698                 };
1699                 break;
1700         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
1701                 attributes->l4.udp.hdr = (struct udp_hdr){
1702                         .src_port = input->flow.udp6_flow.src_port,
1703                         .dst_port = input->flow.udp6_flow.dst_port,
1704                 };
1705                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
1706                         .src_port = mask->src_port_mask,
1707                         .dst_port = mask->dst_port_mask,
1708                 };
1709                 attributes->items[2] = (struct rte_flow_item){
1710                         .type = RTE_FLOW_ITEM_TYPE_UDP,
1711                         .spec = &attributes->l4,
1712                         .mask = &attributes->l4_mask,
1713                 };
1714                 break;
1715         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
1716                 attributes->l4.tcp.hdr = (struct tcp_hdr){
1717                         .src_port = input->flow.tcp6_flow.src_port,
1718                         .dst_port = input->flow.tcp6_flow.dst_port,
1719                 };
1720                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
1721                         .src_port = mask->src_port_mask,
1722                         .dst_port = mask->dst_port_mask,
1723                 };
1724                 attributes->items[2] = (struct rte_flow_item){
1725                         .type = RTE_FLOW_ITEM_TYPE_TCP,
1726                         .spec = &attributes->l4,
1727                         .mask = &attributes->l4_mask,
1728                 };
1729                 break;
1730         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
1731         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
1732                 break;
1733         default:
1734                 DRV_LOG(ERR, "port %u invalid flow type%d",
1735                         dev->data->port_id, fdir_filter->input.flow_type);
1736                 rte_errno = ENOTSUP;
1737                 return -rte_errno;
1738         }
1739         return 0;
1740 }
1741
1742 /**
1743  * Add new flow director filter and store it in list.
1744  *
1745  * @param dev
1746  *   Pointer to Ethernet device.
1747  * @param fdir_filter
1748  *   Flow director filter to add.
1749  *
1750  * @return
1751  *   0 on success, a negative errno value otherwise and rte_errno is set.
1752  */
1753 static int
1754 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
1755                      const struct rte_eth_fdir_filter *fdir_filter)
1756 {
1757         struct priv *priv = dev->data->dev_private;
1758         struct mlx5_fdir attributes = {
1759                 .attr.group = 0,
1760                 .l2_mask = {
1761                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1762                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1763                         .type = 0,
1764                 },
1765         };
1766         struct rte_flow_error error;
1767         struct rte_flow *flow;
1768         int ret;
1769
1770         ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
1771         if (ret)
1772                 return ret;
1773         flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
1774                                      attributes.items, attributes.actions,
1775                                      &error);
1776         if (flow) {
1777                 DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
1778                         (void *)flow);
1779                 return 0;
1780         }
1781         return -rte_errno;
1782 }
1783
1784 /**
1785  * Delete specific filter.
1786  *
1787  * @param dev
1788  *   Pointer to Ethernet device.
1789  * @param fdir_filter
1790  *   Filter to be deleted.
1791  *
1792  * @return
1793  *   0 on success, a negative errno value otherwise and rte_errno is set.
1794  */
1795 static int
1796 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
1797                         const struct rte_eth_fdir_filter *fdir_filter
1798                         __rte_unused)
1799 {
1800         rte_errno = ENOTSUP;
1801         return -rte_errno;
1802 }
1803
1804 /**
1805  * Update queue for specific filter.
1806  *
1807  * @param dev
1808  *   Pointer to Ethernet device.
1809  * @param fdir_filter
1810  *   Filter to be updated.
1811  *
1812  * @return
1813  *   0 on success, a negative errno value otherwise and rte_errno is set.
1814  */
1815 static int
1816 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
1817                         const struct rte_eth_fdir_filter *fdir_filter)
1818 {
1819         int ret;
1820
1821         ret = mlx5_fdir_filter_delete(dev, fdir_filter);
1822         if (ret)
1823                 return ret;
1824         return mlx5_fdir_filter_add(dev, fdir_filter);
1825 }
1826
1827 /**
1828  * Flush all filters.
1829  *
1830  * @param dev
1831  *   Pointer to Ethernet device.
1832  */
1833 static void
1834 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
1835 {
1836         struct priv *priv = dev->data->dev_private;
1837
1838         mlx5_flow_list_flush(dev, &priv->flows);
1839 }
1840
1841 /**
1842  * Get flow director information.
1843  *
1844  * @param dev
1845  *   Pointer to Ethernet device.
1846  * @param[out] fdir_info
1847  *   Resulting flow director information.
1848  */
1849 static void
1850 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1851 {
1852         struct rte_eth_fdir_masks *mask =
1853                 &dev->data->dev_conf.fdir_conf.mask;
1854
1855         fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1856         fdir_info->guarant_spc = 0;
1857         rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
1858         fdir_info->max_flexpayload = 0;
1859         fdir_info->flow_types_mask[0] = 0;
1860         fdir_info->flex_payload_unit = 0;
1861         fdir_info->max_flex_payload_segment_num = 0;
1862         fdir_info->flex_payload_limit = 0;
1863         memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
1864 }
1865
1866 /**
1867  * Deal with flow director operations.
1868  *
1869  * @param dev
1870  *   Pointer to Ethernet device.
1871  * @param filter_op
1872  *   Operation to perform.
1873  * @param arg
1874  *   Pointer to operation-specific structure.
1875  *
1876  * @return
1877  *   0 on success, a negative errno value otherwise and rte_errno is set.
1878  */
1879 static int
1880 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1881                     void *arg)
1882 {
1883         enum rte_fdir_mode fdir_mode =
1884                 dev->data->dev_conf.fdir_conf.mode;
1885
1886         if (filter_op == RTE_ETH_FILTER_NOP)
1887                 return 0;
1888         if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
1889             fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1890                 DRV_LOG(ERR, "port %u flow director mode %d not supported",
1891                         dev->data->port_id, fdir_mode);
1892                 rte_errno = EINVAL;
1893                 return -rte_errno;
1894         }
1895         switch (filter_op) {
1896         case RTE_ETH_FILTER_ADD:
1897                 return mlx5_fdir_filter_add(dev, arg);
1898         case RTE_ETH_FILTER_UPDATE:
1899                 return mlx5_fdir_filter_update(dev, arg);
1900         case RTE_ETH_FILTER_DELETE:
1901                 return mlx5_fdir_filter_delete(dev, arg);
1902         case RTE_ETH_FILTER_FLUSH:
1903                 mlx5_fdir_filter_flush(dev);
1904                 break;
1905         case RTE_ETH_FILTER_INFO:
1906                 mlx5_fdir_info_get(dev, arg);
1907                 break;
1908         default:
1909                 DRV_LOG(DEBUG, "port %u unknown operation %u",
1910                         dev->data->port_id, filter_op);
1911                 rte_errno = EINVAL;
1912                 return -rte_errno;
1913         }
1914         return 0;
1915 }
1916
1917 /**
1918  * Manage filter operations.
1919  *
1920  * @param dev
1921  *   Pointer to Ethernet device structure.
1922  * @param filter_type
1923  *   Filter type.
1924  * @param filter_op
1925  *   Operation to perform.
1926  * @param arg
1927  *   Pointer to operation-specific structure.
1928  *
1929  * @return
1930  *   0 on success, a negative errno value otherwise and rte_errno is set.
1931  */
1932 int
1933 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
1934                      enum rte_filter_type filter_type,
1935                      enum rte_filter_op filter_op,
1936                      void *arg)
1937 {
1938         switch (filter_type) {
1939         case RTE_ETH_FILTER_GENERIC:
1940                 if (filter_op != RTE_ETH_FILTER_GET) {
1941                         rte_errno = EINVAL;
1942                         return -rte_errno;
1943                 }
1944                 *(const void **)arg = &mlx5_flow_ops;
1945                 return 0;
1946         case RTE_ETH_FILTER_FDIR:
1947                 return mlx5_fdir_ctrl_func(dev, filter_op, arg);
1948         default:
1949                 DRV_LOG(ERR, "port %u filter type (%d) not supported",
1950                         dev->data->port_id, filter_type);
1951                 rte_errno = ENOTSUP;
1952                 return -rte_errno;
1953         }
1954         return 0;
1955 }