net/mlx4: fix empty Ethernet spec with VLAN
[dpdk.git] / drivers / net / mlx4 / mlx4_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5
6 /**
7  * @file
8  * Flow API operations for mlx4 driver.
9  */
10
11 #include <arpa/inet.h>
12 #include <errno.h>
13 #include <stdalign.h>
14 #include <stddef.h>
15 #include <stdint.h>
16 #include <string.h>
17 #include <sys/queue.h>
18
19 /* Verbs headers do not support -pedantic. */
20 #ifdef PEDANTIC
21 #pragma GCC diagnostic ignored "-Wpedantic"
22 #endif
23 #include <infiniband/verbs.h>
24 #ifdef PEDANTIC
25 #pragma GCC diagnostic error "-Wpedantic"
26 #endif
27
28 #include <rte_byteorder.h>
29 #include <rte_errno.h>
30 #include <ethdev_driver.h>
31 #include <rte_ether.h>
32 #include <rte_flow.h>
33 #include <rte_flow_driver.h>
34 #include <rte_malloc.h>
35
36 /* PMD headers. */
37 #include "mlx4.h"
38 #include "mlx4_glue.h"
39 #include "mlx4_flow.h"
40 #include "mlx4_rxtx.h"
41 #include "mlx4_utils.h"
42
43 /** Static initializer for a list of subsequent item types. */
44 #define NEXT_ITEM(...) \
45         (const enum rte_flow_item_type []){ \
46                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
47         }
48
49 /** Processor structure associated with a flow item. */
50 struct mlx4_flow_proc_item {
51         /** Bit-mask for fields supported by this PMD. */
52         const void *mask_support;
53         /** Bit-mask to use when @p item->mask is not provided. */
54         const void *mask_default;
55         /** Size in bytes for @p mask_support and @p mask_default. */
56         const unsigned int mask_sz;
57         /** Merge a pattern item into a flow rule handle. */
58         int (*merge)(struct rte_flow *flow,
59                      const struct rte_flow_item *item,
60                      const struct mlx4_flow_proc_item *proc,
61                      struct rte_flow_error *error);
62         /** Size in bytes of the destination structure. */
63         const unsigned int dst_sz;
64         /** List of possible subsequent items. */
65         const enum rte_flow_item_type *const next_item;
66 };
67
68 /** Shared resources for drop flow rules. */
69 struct mlx4_drop {
70         struct ibv_qp *qp; /**< QP target. */
71         struct ibv_cq *cq; /**< CQ associated with above QP. */
72         struct mlx4_priv *priv; /**< Back pointer to private data. */
73         uint32_t refcnt; /**< Reference count. */
74 };
75
76 /**
77  * Convert supported RSS hash field types between DPDK and Verbs formats.
78  *
79  * This function returns the supported (default) set when @p types has
80  * special value 0.
81  *
82  * @param priv
83  *   Pointer to private structure.
84  * @param types
85  *   Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct
86  *   rte_eth_rss_conf) or Verbs format.
87  * @param verbs_to_dpdk
88  *   A zero value converts @p types from DPDK to Verbs, a nonzero value
89  *   performs the reverse operation.
90  *
91  * @return
92  *   Converted RSS hash fields on success, (uint64_t)-1 otherwise and
93  *   rte_errno is set.
94  */
95 uint64_t
96 mlx4_conv_rss_types(struct mlx4_priv *priv, uint64_t types, int verbs_to_dpdk)
97 {
98         enum {
99                 INNER,
100                 IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3,
101                 TCP, UDP,
102                 IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1,
103         };
104         enum {
105                 VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
106                 VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
107                 VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
108                 VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
109         };
110         static const uint64_t dpdk[] = {
111                 [INNER] = 0,
112                 [IPV4] = RTE_ETH_RSS_IPV4,
113                 [IPV4_1] = RTE_ETH_RSS_FRAG_IPV4,
114                 [IPV4_2] = RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
115                 [IPV6] = RTE_ETH_RSS_IPV6,
116                 [IPV6_1] = RTE_ETH_RSS_FRAG_IPV6,
117                 [IPV6_2] = RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
118                 [IPV6_3] = RTE_ETH_RSS_IPV6_EX,
119                 [TCP] = 0,
120                 [UDP] = 0,
121                 [IPV4_TCP] = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
122                 [IPV4_UDP] = RTE_ETH_RSS_NONFRAG_IPV4_UDP,
123                 [IPV6_TCP] = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
124                 [IPV6_TCP_1] = RTE_ETH_RSS_IPV6_TCP_EX,
125                 [IPV6_UDP] = RTE_ETH_RSS_NONFRAG_IPV6_UDP,
126                 [IPV6_UDP_1] = RTE_ETH_RSS_IPV6_UDP_EX,
127         };
128         static const uint64_t verbs[RTE_DIM(dpdk)] = {
129                 [INNER] = IBV_RX_HASH_INNER,
130                 [IPV4] = VERBS_IPV4,
131                 [IPV4_1] = VERBS_IPV4,
132                 [IPV4_2] = VERBS_IPV4,
133                 [IPV6] = VERBS_IPV6,
134                 [IPV6_1] = VERBS_IPV6,
135                 [IPV6_2] = VERBS_IPV6,
136                 [IPV6_3] = VERBS_IPV6,
137                 [TCP] = VERBS_TCP,
138                 [UDP] = VERBS_UDP,
139                 [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP,
140                 [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP,
141                 [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP,
142                 [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP,
143                 [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP,
144                 [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP,
145         };
146         const uint64_t *in = verbs_to_dpdk ? verbs : dpdk;
147         const uint64_t *out = verbs_to_dpdk ? dpdk : verbs;
148         uint64_t seen = 0;
149         uint64_t conv = 0;
150         unsigned int i;
151
152         if (!types) {
153                 if (!verbs_to_dpdk)
154                         return priv->hw_rss_sup;
155                 types = priv->hw_rss_sup;
156         }
157         for (i = 0; i != RTE_DIM(dpdk); ++i)
158                 if (in[i] && (types & in[i]) == in[i]) {
159                         seen |= types & in[i];
160                         conv |= out[i];
161                 }
162         if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) &&
163             !(types & ~seen))
164                 return conv;
165         rte_errno = ENOTSUP;
166         return (uint64_t)-1;
167 }
168
169 /**
170  * Merge Ethernet pattern item into flow rule handle.
171  *
172  * Additional mlx4-specific constraints on supported fields:
173  *
174  * - No support for partial masks, except in the specific case of matching
175  *   all multicast traffic (@p spec->dst and @p mask->dst equal to
176  *   01:00:00:00:00:00).
177  * - Not providing @p item->spec or providing an empty @p mask->dst is
178  *   *only* supported if the rule doesn't specify additional matching
179  *   criteria (i.e. rule is promiscuous-like).
180  *
181  * @param[in, out] flow
182  *   Flow rule handle to update.
183  * @param[in] item
184  *   Pattern item to merge.
185  * @param[in] proc
186  *   Associated item-processing object.
187  * @param[out] error
188  *   Perform verbose error reporting if not NULL.
189  *
190  * @return
191  *   0 on success, a negative errno value otherwise and rte_errno is set.
192  */
193 static int
194 mlx4_flow_merge_eth(struct rte_flow *flow,
195                     const struct rte_flow_item *item,
196                     const struct mlx4_flow_proc_item *proc,
197                     struct rte_flow_error *error)
198 {
199         const struct rte_flow_item_eth *spec = item->spec;
200         const struct rte_flow_item_eth *mask =
201                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
202         struct ibv_flow_spec_eth *eth;
203         const char *msg;
204         unsigned int i;
205
206         if (mask) {
207                 uint32_t sum_dst = 0;
208                 uint32_t sum_src = 0;
209
210                 for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
211                         sum_dst += mask->dst.addr_bytes[i];
212                         sum_src += mask->src.addr_bytes[i];
213                 }
214                 if (sum_src) {
215                         msg = "mlx4 does not support source MAC matching";
216                         goto error;
217                 } else if (!sum_dst) {
218                         flow->promisc = 1;
219                 } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
220                         if (!(spec->dst.addr_bytes[0] & 1)) {
221                                 msg = "mlx4 does not support the explicit"
222                                         " exclusion of all multicast traffic";
223                                 goto error;
224                         }
225                         flow->allmulti = 1;
226                 } else if (sum_dst != (UINT8_C(0xff) * RTE_ETHER_ADDR_LEN)) {
227                         msg = "mlx4 does not support matching partial"
228                                 " Ethernet fields";
229                         goto error;
230                 }
231         }
232         if (!flow->ibv_attr)
233                 return 0;
234         if (flow->promisc) {
235                 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
236                 return 0;
237         }
238         if (flow->allmulti) {
239                 flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
240                 return 0;
241         }
242         ++flow->ibv_attr->num_of_specs;
243         eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
244         *eth = (struct ibv_flow_spec_eth) {
245                 .type = IBV_FLOW_SPEC_ETH,
246                 .size = sizeof(*eth),
247         };
248         if (!mask) {
249                 eth->val.dst_mac[0] = 0xff;
250                 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
251                 flow->promisc = 1;
252                 return 0;
253         }
254         memcpy(eth->val.dst_mac, spec->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
255         memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
256         /* Remove unwanted bits from values. */
257         for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
258                 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
259
260         return 0;
261 error:
262         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
263                                   item, msg);
264 }
265
266 /**
267  * Merge VLAN pattern item into flow rule handle.
268  *
269  * Additional mlx4-specific constraints on supported fields:
270  *
271  * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
272  *   empty @p item->mask would also include non-VLAN traffic. Doing so is
273  *   therefore unsupported.
274  * - No support for partial masks.
275  *
276  * @param[in, out] flow
277  *   Flow rule handle to update.
278  * @param[in] item
279  *   Pattern item to merge.
280  * @param[in] proc
281  *   Associated item-processing object.
282  * @param[out] error
283  *   Perform verbose error reporting if not NULL.
284  *
285  * @return
286  *   0 on success, a negative errno value otherwise and rte_errno is set.
287  */
288 static int
289 mlx4_flow_merge_vlan(struct rte_flow *flow,
290                      const struct rte_flow_item *item,
291                      const struct mlx4_flow_proc_item *proc,
292                      struct rte_flow_error *error)
293 {
294         const struct rte_flow_item_vlan *spec = item->spec;
295         const struct rte_flow_item_vlan *mask =
296                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
297         struct ibv_flow_spec_eth *eth;
298         const char *msg;
299
300         if (!mask || !mask->tci) {
301                 msg = "mlx4 cannot match all VLAN traffic while excluding"
302                         " non-VLAN traffic, TCI VID must be specified";
303                 goto error;
304         }
305         if (mask->tci != RTE_BE16(0x0fff)) {
306                 msg = "mlx4 does not support partial TCI VID matching";
307                 goto error;
308         }
309         if (!flow->ibv_attr)
310                 return 0;
311         eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
312                        sizeof(*eth));
313         eth->val.vlan_tag = spec->tci;
314         eth->mask.vlan_tag = mask->tci;
315         eth->val.vlan_tag &= eth->mask.vlan_tag;
316         if (flow->ibv_attr->type == IBV_FLOW_ATTR_ALL_DEFAULT)
317                 flow->ibv_attr->type = IBV_FLOW_ATTR_NORMAL;
318         return 0;
319 error:
320         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
321                                   item, msg);
322 }
323
324 /**
325  * Merge IPv4 pattern item into flow rule handle.
326  *
327  * Additional mlx4-specific constraints on supported fields:
328  *
329  * - No support for partial masks.
330  *
331  * @param[in, out] flow
332  *   Flow rule handle to update.
333  * @param[in] item
334  *   Pattern item to merge.
335  * @param[in] proc
336  *   Associated item-processing object.
337  * @param[out] error
338  *   Perform verbose error reporting if not NULL.
339  *
340  * @return
341  *   0 on success, a negative errno value otherwise and rte_errno is set.
342  */
343 static int
344 mlx4_flow_merge_ipv4(struct rte_flow *flow,
345                      const struct rte_flow_item *item,
346                      const struct mlx4_flow_proc_item *proc,
347                      struct rte_flow_error *error)
348 {
349         const struct rte_flow_item_ipv4 *spec = item->spec;
350         const struct rte_flow_item_ipv4 *mask =
351                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
352         struct ibv_flow_spec_ipv4 *ipv4;
353         const char *msg;
354
355         if (mask &&
356             ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
357              (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
358                 msg = "mlx4 does not support matching partial IPv4 fields";
359                 goto error;
360         }
361         if (!flow->ibv_attr)
362                 return 0;
363         ++flow->ibv_attr->num_of_specs;
364         ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
365         *ipv4 = (struct ibv_flow_spec_ipv4) {
366                 .type = IBV_FLOW_SPEC_IPV4,
367                 .size = sizeof(*ipv4),
368         };
369         if (!spec)
370                 return 0;
371         ipv4->val = (struct ibv_flow_ipv4_filter) {
372                 .src_ip = spec->hdr.src_addr,
373                 .dst_ip = spec->hdr.dst_addr,
374         };
375         ipv4->mask = (struct ibv_flow_ipv4_filter) {
376                 .src_ip = mask->hdr.src_addr,
377                 .dst_ip = mask->hdr.dst_addr,
378         };
379         /* Remove unwanted bits from values. */
380         ipv4->val.src_ip &= ipv4->mask.src_ip;
381         ipv4->val.dst_ip &= ipv4->mask.dst_ip;
382         return 0;
383 error:
384         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
385                                   item, msg);
386 }
387
388 /**
389  * Merge UDP pattern item into flow rule handle.
390  *
391  * Additional mlx4-specific constraints on supported fields:
392  *
393  * - No support for partial masks.
394  * - Due to HW/FW limitation, flow rule priority is not taken into account
395  *   when matching UDP destination ports, doing is therefore only supported
396  *   at the highest priority level (0).
397  *
398  * @param[in, out] flow
399  *   Flow rule handle to update.
400  * @param[in] item
401  *   Pattern item to merge.
402  * @param[in] proc
403  *   Associated item-processing object.
404  * @param[out] error
405  *   Perform verbose error reporting if not NULL.
406  *
407  * @return
408  *   0 on success, a negative errno value otherwise and rte_errno is set.
409  */
410 static int
411 mlx4_flow_merge_udp(struct rte_flow *flow,
412                     const struct rte_flow_item *item,
413                     const struct mlx4_flow_proc_item *proc,
414                     struct rte_flow_error *error)
415 {
416         const struct rte_flow_item_udp *spec = item->spec;
417         const struct rte_flow_item_udp *mask =
418                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
419         struct ibv_flow_spec_tcp_udp *udp;
420         const char *msg;
421
422         if (mask &&
423             ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
424              (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
425                 msg = "mlx4 does not support matching partial UDP fields";
426                 goto error;
427         }
428         if (mask && mask->hdr.dst_port && flow->priority) {
429                 msg = "combining UDP destination port matching with a nonzero"
430                         " priority level is not supported";
431                 goto error;
432         }
433         if (!flow->ibv_attr)
434                 return 0;
435         ++flow->ibv_attr->num_of_specs;
436         udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
437         *udp = (struct ibv_flow_spec_tcp_udp) {
438                 .type = IBV_FLOW_SPEC_UDP,
439                 .size = sizeof(*udp),
440         };
441         if (!spec)
442                 return 0;
443         udp->val.dst_port = spec->hdr.dst_port;
444         udp->val.src_port = spec->hdr.src_port;
445         udp->mask.dst_port = mask->hdr.dst_port;
446         udp->mask.src_port = mask->hdr.src_port;
447         /* Remove unwanted bits from values. */
448         udp->val.src_port &= udp->mask.src_port;
449         udp->val.dst_port &= udp->mask.dst_port;
450         return 0;
451 error:
452         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
453                                   item, msg);
454 }
455
456 /**
457  * Merge TCP pattern item into flow rule handle.
458  *
459  * Additional mlx4-specific constraints on supported fields:
460  *
461  * - No support for partial masks.
462  *
463  * @param[in, out] flow
464  *   Flow rule handle to update.
465  * @param[in] item
466  *   Pattern item to merge.
467  * @param[in] proc
468  *   Associated item-processing object.
469  * @param[out] error
470  *   Perform verbose error reporting if not NULL.
471  *
472  * @return
473  *   0 on success, a negative errno value otherwise and rte_errno is set.
474  */
475 static int
476 mlx4_flow_merge_tcp(struct rte_flow *flow,
477                     const struct rte_flow_item *item,
478                     const struct mlx4_flow_proc_item *proc,
479                     struct rte_flow_error *error)
480 {
481         const struct rte_flow_item_tcp *spec = item->spec;
482         const struct rte_flow_item_tcp *mask =
483                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
484         struct ibv_flow_spec_tcp_udp *tcp;
485         const char *msg;
486
487         if (mask &&
488             ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
489              (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
490                 msg = "mlx4 does not support matching partial TCP fields";
491                 goto error;
492         }
493         if (!flow->ibv_attr)
494                 return 0;
495         ++flow->ibv_attr->num_of_specs;
496         tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
497         *tcp = (struct ibv_flow_spec_tcp_udp) {
498                 .type = IBV_FLOW_SPEC_TCP,
499                 .size = sizeof(*tcp),
500         };
501         if (!spec)
502                 return 0;
503         tcp->val.dst_port = spec->hdr.dst_port;
504         tcp->val.src_port = spec->hdr.src_port;
505         tcp->mask.dst_port = mask->hdr.dst_port;
506         tcp->mask.src_port = mask->hdr.src_port;
507         /* Remove unwanted bits from values. */
508         tcp->val.src_port &= tcp->mask.src_port;
509         tcp->val.dst_port &= tcp->mask.dst_port;
510         return 0;
511 error:
512         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
513                                   item, msg);
514 }
515
516 /**
517  * Perform basic sanity checks on a pattern item.
518  *
519  * @param[in] item
520  *   Item specification.
521  * @param[in] proc
522  *   Associated item-processing object.
523  * @param[out] error
524  *   Perform verbose error reporting if not NULL.
525  *
526  * @return
527  *   0 on success, a negative errno value otherwise and rte_errno is set.
528  */
529 static int
530 mlx4_flow_item_check(const struct rte_flow_item *item,
531                      const struct mlx4_flow_proc_item *proc,
532                      struct rte_flow_error *error)
533 {
534         const uint8_t *mask;
535         unsigned int i;
536
537         /* item->last and item->mask cannot exist without item->spec. */
538         if (!item->spec && (item->mask || item->last))
539                 return rte_flow_error_set
540                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
541                          "\"mask\" or \"last\" field provided without a"
542                          " corresponding \"spec\"");
543         /* No spec, no mask, no problem. */
544         if (!item->spec)
545                 return 0;
546         mask = item->mask ?
547                 (const uint8_t *)item->mask :
548                 (const uint8_t *)proc->mask_default;
549         MLX4_ASSERT(mask);
550         /*
551          * Single-pass check to make sure that:
552          * - Mask is supported, no bits are set outside proc->mask_support.
553          * - Both item->spec and item->last are included in mask.
554          */
555         for (i = 0; i != proc->mask_sz; ++i) {
556                 if (!mask[i])
557                         continue;
558                 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
559                     ((const uint8_t *)proc->mask_support)[i])
560                         return rte_flow_error_set
561                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
562                                  item, "unsupported field found in \"mask\"");
563                 if (item->last &&
564                     (((const uint8_t *)item->spec)[i] & mask[i]) !=
565                     (((const uint8_t *)item->last)[i] & mask[i]))
566                         return rte_flow_error_set
567                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
568                                  item,
569                                  "range between \"spec\" and \"last\""
570                                  " is larger than \"mask\"");
571         }
572         return 0;
573 }
574
575 /** Graph of supported items and associated actions. */
576 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
577         [RTE_FLOW_ITEM_TYPE_END] = {
578                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
579         },
580         [RTE_FLOW_ITEM_TYPE_ETH] = {
581                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
582                                        RTE_FLOW_ITEM_TYPE_IPV4),
583                 .mask_support = &(const struct rte_flow_item_eth){
584                         /* Only destination MAC can be matched. */
585                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
586                 },
587                 .mask_default = &rte_flow_item_eth_mask,
588                 .mask_sz = sizeof(struct rte_flow_item_eth),
589                 .merge = mlx4_flow_merge_eth,
590                 .dst_sz = sizeof(struct ibv_flow_spec_eth),
591         },
592         [RTE_FLOW_ITEM_TYPE_VLAN] = {
593                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
594                 .mask_support = &(const struct rte_flow_item_vlan){
595                         /* Only TCI VID matching is supported. */
596                         .tci = RTE_BE16(0x0fff),
597                 },
598                 .mask_default = &rte_flow_item_vlan_mask,
599                 .mask_sz = sizeof(struct rte_flow_item_vlan),
600                 .merge = mlx4_flow_merge_vlan,
601                 .dst_sz = 0,
602         },
603         [RTE_FLOW_ITEM_TYPE_IPV4] = {
604                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
605                                        RTE_FLOW_ITEM_TYPE_TCP),
606                 .mask_support = &(const struct rte_flow_item_ipv4){
607                         .hdr = {
608                                 .src_addr = RTE_BE32(0xffffffff),
609                                 .dst_addr = RTE_BE32(0xffffffff),
610                         },
611                 },
612                 .mask_default = &rte_flow_item_ipv4_mask,
613                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
614                 .merge = mlx4_flow_merge_ipv4,
615                 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
616         },
617         [RTE_FLOW_ITEM_TYPE_UDP] = {
618                 .mask_support = &(const struct rte_flow_item_udp){
619                         .hdr = {
620                                 .src_port = RTE_BE16(0xffff),
621                                 .dst_port = RTE_BE16(0xffff),
622                         },
623                 },
624                 .mask_default = &rte_flow_item_udp_mask,
625                 .mask_sz = sizeof(struct rte_flow_item_udp),
626                 .merge = mlx4_flow_merge_udp,
627                 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
628         },
629         [RTE_FLOW_ITEM_TYPE_TCP] = {
630                 .mask_support = &(const struct rte_flow_item_tcp){
631                         .hdr = {
632                                 .src_port = RTE_BE16(0xffff),
633                                 .dst_port = RTE_BE16(0xffff),
634                         },
635                 },
636                 .mask_default = &rte_flow_item_tcp_mask,
637                 .mask_sz = sizeof(struct rte_flow_item_tcp),
638                 .merge = mlx4_flow_merge_tcp,
639                 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
640         },
641 };
642
643 /**
644  * Make sure a flow rule is supported and initialize associated structure.
645  *
646  * @param priv
647  *   Pointer to private structure.
648  * @param[in] attr
649  *   Flow rule attributes.
650  * @param[in] pattern
651  *   Pattern specification (list terminated by the END pattern item).
652  * @param[in] actions
653  *   Associated actions (list terminated by the END action).
654  * @param[out] error
655  *   Perform verbose error reporting if not NULL.
656  * @param[in, out] addr
657  *   Buffer where the resulting flow rule handle pointer must be stored.
658  *   If NULL, stop processing after validation stage.
659  *
660  * @return
661  *   0 on success, a negative errno value otherwise and rte_errno is set.
662  */
663 static int
664 mlx4_flow_prepare(struct mlx4_priv *priv,
665                   const struct rte_flow_attr *attr,
666                   const struct rte_flow_item pattern[],
667                   const struct rte_flow_action actions[],
668                   struct rte_flow_error *error,
669                   struct rte_flow **addr)
670 {
671         const struct rte_flow_item *item;
672         const struct rte_flow_action *action;
673         const struct mlx4_flow_proc_item *proc;
674         struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
675         struct rte_flow *flow = &temp;
676         const char *msg = NULL;
677         int overlap;
678
679         if (attr->group)
680                 return rte_flow_error_set
681                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
682                          NULL, "groups are not supported");
683         if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
684                 return rte_flow_error_set
685                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
686                          NULL, "maximum priority level is "
687                          MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
688         if (attr->egress)
689                 return rte_flow_error_set
690                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
691                          NULL, "egress is not supported");
692         if (attr->transfer)
693                 return rte_flow_error_set
694                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
695                          NULL, "transfer is not supported");
696         if (!attr->ingress)
697                 return rte_flow_error_set
698                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
699                          NULL, "only ingress is supported");
700 fill:
701         overlap = 0;
702         proc = mlx4_flow_proc_item_list;
703         flow->priority = attr->priority;
704         /* Go over pattern. */
705         for (item = pattern; item->type; ++item) {
706                 const struct mlx4_flow_proc_item *next = NULL;
707                 unsigned int i;
708                 int err;
709
710                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
711                         continue;
712                 if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
713                         flow->internal = 1;
714                         continue;
715                 }
716                 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN && flow->promisc) ||
717                     flow->allmulti) {
718                         msg = "mlx4 does not support additional matching"
719                                 " criteria combined with indiscriminate"
720                                 " matching on Ethernet headers";
721                         goto exit_item_not_supported;
722                 }
723                 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
724                         if (proc->next_item[i] == item->type) {
725                                 next = &mlx4_flow_proc_item_list[item->type];
726                                 break;
727                         }
728                 }
729                 if (!next)
730                         goto exit_item_not_supported;
731                 proc = next;
732                 /*
733                  * Perform basic sanity checks only once, while handle is
734                  * not allocated.
735                  */
736                 if (flow == &temp) {
737                         err = mlx4_flow_item_check(item, proc, error);
738                         if (err)
739                                 return err;
740                 }
741                 if (proc->merge) {
742                         err = proc->merge(flow, item, proc, error);
743                         if (err)
744                                 return err;
745                 }
746                 flow->ibv_attr_size += proc->dst_sz;
747         }
748         /* Go over actions list. */
749         for (action = actions; action->type; ++action) {
750                 /* This one may appear anywhere multiple times. */
751                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
752                         continue;
753                 /* Fate-deciding actions may appear exactly once. */
754                 if (overlap) {
755                         msg = "cannot combine several fate-deciding actions,"
756                                 " choose between DROP, QUEUE or RSS";
757                         goto exit_action_not_supported;
758                 }
759                 overlap = 1;
760                 switch (action->type) {
761                         const struct rte_flow_action_queue *queue;
762                         const struct rte_flow_action_rss *rss;
763                         const uint8_t *rss_key;
764                         uint32_t rss_key_len;
765                         uint64_t fields;
766                         unsigned int i;
767
768                 case RTE_FLOW_ACTION_TYPE_DROP:
769                         flow->drop = 1;
770                         break;
771                 case RTE_FLOW_ACTION_TYPE_QUEUE:
772                         if (flow->rss)
773                                 break;
774                         queue = action->conf;
775                         if (queue->index >= ETH_DEV(priv)->data->nb_rx_queues) {
776                                 msg = "queue target index beyond number of"
777                                         " configured Rx queues";
778                                 goto exit_action_not_supported;
779                         }
780                         flow->rss = mlx4_rss_get
781                                 (priv, 0, mlx4_rss_hash_key_default, 1,
782                                  &queue->index);
783                         if (!flow->rss) {
784                                 msg = "not enough resources for additional"
785                                         " single-queue RSS context";
786                                 goto exit_action_not_supported;
787                         }
788                         break;
789                 case RTE_FLOW_ACTION_TYPE_RSS:
790                         if (flow->rss)
791                                 break;
792                         rss = action->conf;
793                         /* Default RSS configuration if none is provided. */
794                         if (rss->key_len) {
795                                 rss_key = rss->key ?
796                                           rss->key : mlx4_rss_hash_key_default;
797                                 rss_key_len = rss->key_len;
798                         } else {
799                                 rss_key = mlx4_rss_hash_key_default;
800                                 rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
801                         }
802                         /* Sanity checks. */
803                         for (i = 0; i < rss->queue_num; ++i)
804                                 if (rss->queue[i] >=
805                                     ETH_DEV(priv)->data->nb_rx_queues)
806                                         break;
807                         if (i != rss->queue_num) {
808                                 msg = "queue index target beyond number of"
809                                         " configured Rx queues";
810                                 goto exit_action_not_supported;
811                         }
812                         if (!rte_is_power_of_2(rss->queue_num)) {
813                                 msg = "for RSS, mlx4 requires the number of"
814                                         " queues to be a power of two";
815                                 goto exit_action_not_supported;
816                         }
817                         if (rss_key_len != sizeof(flow->rss->key)) {
818                                 msg = "mlx4 supports exactly one RSS hash key"
819                                         " length: "
820                                         MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
821                                 goto exit_action_not_supported;
822                         }
823                         for (i = 1; i < rss->queue_num; ++i)
824                                 if (rss->queue[i] - rss->queue[i - 1] != 1)
825                                         break;
826                         if (i != rss->queue_num) {
827                                 msg = "mlx4 requires RSS contexts to use"
828                                         " consecutive queue indices only";
829                                 goto exit_action_not_supported;
830                         }
831                         if (rss->queue[0] % rss->queue_num) {
832                                 msg = "mlx4 requires the first queue of a RSS"
833                                         " context to be aligned on a multiple"
834                                         " of the context size";
835                                 goto exit_action_not_supported;
836                         }
837                         if (rss->func &&
838                             rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
839                                 msg = "the only supported RSS hash function"
840                                         " is Toeplitz";
841                                 goto exit_action_not_supported;
842                         }
843                         if (rss->level) {
844                                 msg = "a nonzero RSS encapsulation level is"
845                                         " not supported";
846                                 goto exit_action_not_supported;
847                         }
848                         rte_errno = 0;
849                         fields = mlx4_conv_rss_types(priv, rss->types, 0);
850                         if (fields == (uint64_t)-1 && rte_errno) {
851                                 msg = "unsupported RSS hash type requested";
852                                 goto exit_action_not_supported;
853                         }
854                         flow->rss = mlx4_rss_get
855                                 (priv, fields, rss_key, rss->queue_num,
856                                  rss->queue);
857                         if (!flow->rss) {
858                                 msg = "either invalid parameters or not enough"
859                                         " resources for additional multi-queue"
860                                         " RSS context";
861                                 goto exit_action_not_supported;
862                         }
863                         break;
864                 default:
865                         goto exit_action_not_supported;
866                 }
867         }
868         /* When fate is unknown, drop traffic. */
869         if (!overlap)
870                 flow->drop = 1;
871         /* Validation ends here. */
872         if (!addr) {
873                 if (flow->rss)
874                         mlx4_rss_put(flow->rss);
875                 return 0;
876         }
877         if (flow == &temp) {
878                 /* Allocate proper handle based on collected data. */
879                 const struct mlx4_malloc_vec vec[] = {
880                         {
881                                 .align = alignof(struct rte_flow),
882                                 .size = sizeof(*flow),
883                                 .addr = (void **)&flow,
884                         },
885                         {
886                                 .align = alignof(struct ibv_flow_attr),
887                                 .size = temp.ibv_attr_size,
888                                 .addr = (void **)&temp.ibv_attr,
889                         },
890                 };
891
892                 if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) {
893                         if (temp.rss)
894                                 mlx4_rss_put(temp.rss);
895                         return rte_flow_error_set
896                                 (error, -rte_errno,
897                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
898                                  "flow rule handle allocation failure");
899                 }
900                 /* Most fields will be updated by second pass. */
901                 *flow = (struct rte_flow){
902                         .ibv_attr = temp.ibv_attr,
903                         .ibv_attr_size = sizeof(*flow->ibv_attr),
904                         .rss = temp.rss,
905                 };
906                 *flow->ibv_attr = (struct ibv_flow_attr){
907                         .type = IBV_FLOW_ATTR_NORMAL,
908                         .size = sizeof(*flow->ibv_attr),
909                         .priority = attr->priority,
910                         .port = priv->port,
911                 };
912                 goto fill;
913         }
914         *addr = flow;
915         return 0;
916 exit_item_not_supported:
917         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
918                                   item, msg ? msg : "item not supported");
919 exit_action_not_supported:
920         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
921                                   action, msg ? msg : "action not supported");
922 }
923
924 /**
925  * Validate a flow supported by the NIC.
926  *
927  * @see rte_flow_validate()
928  * @see rte_flow_ops
929  */
930 static int
931 mlx4_flow_validate(struct rte_eth_dev *dev,
932                    const struct rte_flow_attr *attr,
933                    const struct rte_flow_item pattern[],
934                    const struct rte_flow_action actions[],
935                    struct rte_flow_error *error)
936 {
937         struct mlx4_priv *priv = dev->data->dev_private;
938
939         return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
940 }
941
942 /**
943  * Get a drop flow rule resources instance.
944  *
945  * @param priv
946  *   Pointer to private structure.
947  *
948  * @return
949  *   Pointer to drop flow resources on success, NULL otherwise and rte_errno
950  *   is set.
951  */
952 static struct mlx4_drop *
953 mlx4_drop_get(struct mlx4_priv *priv)
954 {
955         struct mlx4_drop *drop = priv->drop;
956
957         if (drop) {
958                 MLX4_ASSERT(drop->refcnt);
959                 MLX4_ASSERT(drop->priv == priv);
960                 ++drop->refcnt;
961                 return drop;
962         }
963         drop = rte_malloc(__func__, sizeof(*drop), 0);
964         if (!drop)
965                 goto error;
966         *drop = (struct mlx4_drop){
967                 .priv = priv,
968                 .refcnt = 1,
969         };
970         drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
971         if (!drop->cq)
972                 goto error;
973         drop->qp = mlx4_glue->create_qp
974                 (priv->pd,
975                  &(struct ibv_qp_init_attr){
976                         .send_cq = drop->cq,
977                         .recv_cq = drop->cq,
978                         .qp_type = IBV_QPT_RAW_PACKET,
979                  });
980         if (!drop->qp)
981                 goto error;
982         priv->drop = drop;
983         return drop;
984 error:
985         if (drop) {
986                 if (drop->qp)
987                         claim_zero(mlx4_glue->destroy_qp(drop->qp));
988                 if (drop->cq)
989                         claim_zero(mlx4_glue->destroy_cq(drop->cq));
990                 rte_free(drop);
991         }
992         rte_errno = ENOMEM;
993         return NULL;
994 }
995
996 /**
997  * Give back a drop flow rule resources instance.
998  *
999  * @param drop
1000  *   Pointer to drop flow rule resources.
1001  */
1002 static void
1003 mlx4_drop_put(struct mlx4_drop *drop)
1004 {
1005         MLX4_ASSERT(drop->refcnt);
1006         if (--drop->refcnt)
1007                 return;
1008         drop->priv->drop = NULL;
1009         claim_zero(mlx4_glue->destroy_qp(drop->qp));
1010         claim_zero(mlx4_glue->destroy_cq(drop->cq));
1011         rte_free(drop);
1012 }
1013
1014 /**
1015  * Toggle a configured flow rule.
1016  *
1017  * @param priv
1018  *   Pointer to private structure.
1019  * @param flow
1020  *   Flow rule handle to toggle.
1021  * @param enable
1022  *   Whether associated Verbs flow must be created or removed.
1023  * @param[out] error
1024  *   Perform verbose error reporting if not NULL.
1025  *
1026  * @return
1027  *   0 on success, a negative errno value otherwise and rte_errno is set.
1028  */
1029 static int
1030 mlx4_flow_toggle(struct mlx4_priv *priv,
1031                  struct rte_flow *flow,
1032                  int enable,
1033                  struct rte_flow_error *error)
1034 {
1035         struct ibv_qp *qp = NULL;
1036         const char *msg;
1037         int err;
1038
1039         if (!enable) {
1040                 if (!flow->ibv_flow)
1041                         return 0;
1042                 claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1043                 flow->ibv_flow = NULL;
1044                 if (flow->drop)
1045                         mlx4_drop_put(priv->drop);
1046                 else if (flow->rss)
1047                         mlx4_rss_detach(flow->rss);
1048                 return 0;
1049         }
1050         MLX4_ASSERT(flow->ibv_attr);
1051         if (!flow->internal &&
1052             !priv->isolated &&
1053             flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
1054                 if (flow->ibv_flow) {
1055                         claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1056                         flow->ibv_flow = NULL;
1057                         if (flow->drop)
1058                                 mlx4_drop_put(priv->drop);
1059                         else if (flow->rss)
1060                                 mlx4_rss_detach(flow->rss);
1061                 }
1062                 err = EACCES;
1063                 msg = ("priority level "
1064                        MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
1065                        " is reserved when not in isolated mode");
1066                 goto error;
1067         }
1068         if (flow->rss) {
1069                 struct mlx4_rss *rss = flow->rss;
1070                 int missing = 0;
1071                 unsigned int i;
1072
1073                 /* Stop at the first nonexistent target queue. */
1074                 for (i = 0; i != rss->queues; ++i)
1075                         if (rss->queue_id[i] >=
1076                             ETH_DEV(priv)->data->nb_rx_queues ||
1077                             !ETH_DEV(priv)->data->rx_queues[rss->queue_id[i]]) {
1078                                 missing = 1;
1079                                 break;
1080                         }
1081                 if (flow->ibv_flow) {
1082                         if (missing ^ !flow->drop)
1083                                 return 0;
1084                         /* Verbs flow needs updating. */
1085                         claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
1086                         flow->ibv_flow = NULL;
1087                         if (flow->drop)
1088                                 mlx4_drop_put(priv->drop);
1089                         else
1090                                 mlx4_rss_detach(rss);
1091                 }
1092                 if (!missing) {
1093                         err = mlx4_rss_attach(rss);
1094                         if (err) {
1095                                 err = -err;
1096                                 msg = "cannot create indirection table or hash"
1097                                         " QP to associate flow rule with";
1098                                 goto error;
1099                         }
1100                         qp = rss->qp;
1101                 }
1102                 /* A missing target queue drops traffic implicitly. */
1103                 flow->drop = missing;
1104         }
1105         if (flow->drop) {
1106                 if (flow->ibv_flow)
1107                         return 0;
1108                 mlx4_drop_get(priv);
1109                 if (!priv->drop) {
1110                         err = rte_errno;
1111                         msg = "resources for drop flow rule cannot be created";
1112                         goto error;
1113                 }
1114                 qp = priv->drop->qp;
1115         }
1116         MLX4_ASSERT(qp);
1117         if (flow->ibv_flow)
1118                 return 0;
1119         flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
1120         if (flow->ibv_flow)
1121                 return 0;
1122         if (flow->drop)
1123                 mlx4_drop_put(priv->drop);
1124         else if (flow->rss)
1125                 mlx4_rss_detach(flow->rss);
1126         err = errno;
1127         msg = "flow rule rejected by device";
1128 error:
1129         return rte_flow_error_set
1130                 (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
1131 }
1132
1133 /**
1134  * Create a flow.
1135  *
1136  * @see rte_flow_create()
1137  * @see rte_flow_ops
1138  */
1139 static struct rte_flow *
1140 mlx4_flow_create(struct rte_eth_dev *dev,
1141                  const struct rte_flow_attr *attr,
1142                  const struct rte_flow_item pattern[],
1143                  const struct rte_flow_action actions[],
1144                  struct rte_flow_error *error)
1145 {
1146         struct mlx4_priv *priv = dev->data->dev_private;
1147         struct rte_flow *flow;
1148         int err;
1149
1150         err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
1151         if (err)
1152                 return NULL;
1153         err = mlx4_flow_toggle(priv, flow, priv->started, error);
1154         if (!err) {
1155                 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1156
1157                 /* New rules are inserted after internal ones. */
1158                 if (!curr || !curr->internal) {
1159                         LIST_INSERT_HEAD(&priv->flows, flow, next);
1160                 } else {
1161                         while (LIST_NEXT(curr, next) &&
1162                                LIST_NEXT(curr, next)->internal)
1163                                 curr = LIST_NEXT(curr, next);
1164                         LIST_INSERT_AFTER(curr, flow, next);
1165                 }
1166                 return flow;
1167         }
1168         if (flow->rss)
1169                 mlx4_rss_put(flow->rss);
1170         rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1171                            error->message);
1172         rte_free(flow);
1173         return NULL;
1174 }
1175
1176 /**
1177  * Configure isolated mode.
1178  *
1179  * @see rte_flow_isolate()
1180  * @see rte_flow_ops
1181  */
1182 static int
1183 mlx4_flow_isolate(struct rte_eth_dev *dev,
1184                   int enable,
1185                   struct rte_flow_error *error)
1186 {
1187         struct mlx4_priv *priv = dev->data->dev_private;
1188
1189         if (!!enable == !!priv->isolated)
1190                 return 0;
1191         priv->isolated = !!enable;
1192         if (mlx4_flow_sync(priv, error)) {
1193                 priv->isolated = !enable;
1194                 return -rte_errno;
1195         }
1196         return 0;
1197 }
1198
1199 /**
1200  * Destroy a flow rule.
1201  *
1202  * @see rte_flow_destroy()
1203  * @see rte_flow_ops
1204  */
1205 static int
1206 mlx4_flow_destroy(struct rte_eth_dev *dev,
1207                   struct rte_flow *flow,
1208                   struct rte_flow_error *error)
1209 {
1210         struct mlx4_priv *priv = dev->data->dev_private;
1211         int err = mlx4_flow_toggle(priv, flow, 0, error);
1212
1213         if (err)
1214                 return err;
1215         LIST_REMOVE(flow, next);
1216         if (flow->rss)
1217                 mlx4_rss_put(flow->rss);
1218         rte_free(flow);
1219         return 0;
1220 }
1221
1222 /**
1223  * Destroy user-configured flow rules.
1224  *
1225  * This function skips internal flows rules.
1226  *
1227  * @see rte_flow_flush()
1228  * @see rte_flow_ops
1229  */
1230 static int
1231 mlx4_flow_flush(struct rte_eth_dev *dev,
1232                 struct rte_flow_error *error)
1233 {
1234         struct mlx4_priv *priv = dev->data->dev_private;
1235         struct rte_flow *flow = LIST_FIRST(&priv->flows);
1236
1237         while (flow) {
1238                 struct rte_flow *next = LIST_NEXT(flow, next);
1239
1240                 if (!flow->internal)
1241                         mlx4_flow_destroy(dev, flow, error);
1242                 flow = next;
1243         }
1244         return 0;
1245 }
1246
1247 /**
1248  * Helper function to determine the next configured VLAN filter.
1249  *
1250  * @param priv
1251  *   Pointer to private structure.
1252  * @param vlan
1253  *   VLAN ID to use as a starting point.
1254  *
1255  * @return
1256  *   Next configured VLAN ID or a high value (>= 4096) if there is none.
1257  */
1258 static uint16_t
1259 mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
1260 {
1261         while (vlan < 4096) {
1262                 if (ETH_DEV(priv)->data->vlan_filter_conf.ids[vlan / 64] &
1263                     (UINT64_C(1) << (vlan % 64)))
1264                         return vlan;
1265                 ++vlan;
1266         }
1267         return vlan;
1268 }
1269
1270 /**
1271  * Generate internal flow rules.
1272  *
1273  * Various flow rules are created depending on the mode the device is in:
1274  *
1275  * 1. Promiscuous:
1276  *       port MAC + broadcast + catch-all (VLAN filtering is ignored).
1277  * 2. All multicast:
1278  *       port MAC/VLAN + broadcast + catch-all multicast.
1279  * 3. Otherwise:
1280  *       port MAC/VLAN + broadcast MAC/VLAN.
1281  *
1282  * About MAC flow rules:
1283  *
1284  * - MAC flow rules are generated from @p dev->data->mac_addrs
1285  *   (@p priv->mac array).
1286  * - An additional flow rule for Ethernet broadcasts is also generated.
1287  * - All these are per-VLAN if @p RTE_ETH_RX_OFFLOAD_VLAN_FILTER
1288  *   is enabled and VLAN filters are configured.
1289  *
1290  * @param priv
1291  *   Pointer to private structure.
1292  * @param[out] error
1293  *   Perform verbose error reporting if not NULL.
1294  *
1295  * @return
1296  *   0 on success, a negative errno value otherwise and rte_errno is set.
1297  */
1298 static int
1299 mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
1300 {
1301         struct rte_flow_attr attr = {
1302                 .priority = MLX4_FLOW_PRIORITY_LAST,
1303                 .ingress = 1,
1304         };
1305         struct rte_flow_item_eth eth_spec;
1306         const struct rte_flow_item_eth eth_mask = {
1307                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1308         };
1309         const struct rte_flow_item_eth eth_allmulti = {
1310                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1311         };
1312         struct rte_flow_item_vlan vlan_spec;
1313         const struct rte_flow_item_vlan vlan_mask = {
1314                 .tci = RTE_BE16(0x0fff),
1315         };
1316         struct rte_flow_item pattern[] = {
1317                 {
1318                         .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
1319                 },
1320                 {
1321                         .type = RTE_FLOW_ITEM_TYPE_ETH,
1322                         .spec = &eth_spec,
1323                         .mask = &eth_mask,
1324                 },
1325                 {
1326                         /* Replaced with VLAN if filtering is enabled. */
1327                         .type = RTE_FLOW_ITEM_TYPE_END,
1328                 },
1329                 {
1330                         .type = RTE_FLOW_ITEM_TYPE_END,
1331                 },
1332         };
1333         /*
1334          * Round number of queues down to their previous power of 2 to
1335          * comply with RSS context limitations. Extra queues silently do not
1336          * get RSS by default.
1337          */
1338         uint32_t queues =
1339                 rte_align32pow2(ETH_DEV(priv)->data->nb_rx_queues + 1) >> 1;
1340         uint16_t queue[queues];
1341         struct rte_flow_action_rss action_rss = {
1342                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
1343                 .level = 0,
1344                 .types = 0,
1345                 .key_len = MLX4_RSS_HASH_KEY_SIZE,
1346                 .queue_num = queues,
1347                 .key = mlx4_rss_hash_key_default,
1348                 .queue = queue,
1349         };
1350         struct rte_flow_action actions[] = {
1351                 {
1352                         .type = RTE_FLOW_ACTION_TYPE_RSS,
1353                         .conf = &action_rss,
1354                 },
1355                 {
1356                         .type = RTE_FLOW_ACTION_TYPE_END,
1357                 },
1358         };
1359         struct rte_ether_addr *rule_mac = &eth_spec.dst;
1360         rte_be16_t *rule_vlan =
1361                 (ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
1362                  RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
1363                 !ETH_DEV(priv)->data->promiscuous ?
1364                 &vlan_spec.tci :
1365                 NULL;
1366         uint16_t vlan = 0;
1367         struct rte_flow *flow;
1368         unsigned int i;
1369         int err = 0;
1370
1371         /* Nothing to be done if there are no Rx queues. */
1372         if (!queues)
1373                 goto error;
1374         /* Prepare default RSS configuration. */
1375         for (i = 0; i != queues; ++i)
1376                 queue[i] = i;
1377         /*
1378          * Set up VLAN item if filtering is enabled and at least one VLAN
1379          * filter is configured.
1380          */
1381         if (rule_vlan) {
1382                 vlan = mlx4_flow_internal_next_vlan(priv, 0);
1383                 if (vlan < 4096) {
1384                         pattern[2] = (struct rte_flow_item){
1385                                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1386                                 .spec = &vlan_spec,
1387                                 .mask = &vlan_mask,
1388                         };
1389 next_vlan:
1390                         *rule_vlan = rte_cpu_to_be_16(vlan);
1391                 } else {
1392                         rule_vlan = NULL;
1393                 }
1394         }
1395         for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
1396                 const struct rte_ether_addr *mac;
1397
1398                 /* Broadcasts are handled by an extra iteration. */
1399                 if (i < RTE_DIM(priv->mac))
1400                         mac = &priv->mac[i];
1401                 else
1402                         mac = &eth_mask.dst;
1403                 if (rte_is_zero_ether_addr(mac))
1404                         continue;
1405                 /* Check if MAC flow rule is already present. */
1406                 for (flow = LIST_FIRST(&priv->flows);
1407                      flow && flow->internal;
1408                      flow = LIST_NEXT(flow, next)) {
1409                         const struct ibv_flow_spec_eth *eth =
1410                                 (const void *)((uintptr_t)flow->ibv_attr +
1411                                                sizeof(*flow->ibv_attr));
1412                         unsigned int j;
1413
1414                         if (!flow->mac)
1415                                 continue;
1416                         MLX4_ASSERT(flow->ibv_attr->type ==
1417                                     IBV_FLOW_ATTR_NORMAL);
1418                         MLX4_ASSERT(flow->ibv_attr->num_of_specs == 1);
1419                         MLX4_ASSERT(eth->type == IBV_FLOW_SPEC_ETH);
1420                         MLX4_ASSERT(flow->rss);
1421                         if (rule_vlan &&
1422                             (eth->val.vlan_tag != *rule_vlan ||
1423                              eth->mask.vlan_tag != RTE_BE16(0x0fff)))
1424                                 continue;
1425                         if (!rule_vlan && eth->mask.vlan_tag)
1426                                 continue;
1427                         for (j = 0; j != sizeof(mac->addr_bytes); ++j)
1428                                 if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
1429                                     eth->mask.dst_mac[j] != UINT8_C(0xff) ||
1430                                     eth->val.src_mac[j] != UINT8_C(0x00) ||
1431                                     eth->mask.src_mac[j] != UINT8_C(0x00))
1432                                         break;
1433                         if (j != sizeof(mac->addr_bytes))
1434                                 continue;
1435                         if (flow->rss->queues != queues ||
1436                             memcmp(flow->rss->queue_id, action_rss.queue,
1437                                    queues * sizeof(flow->rss->queue_id[0])))
1438                                 continue;
1439                         break;
1440                 }
1441                 if (!flow || !flow->internal) {
1442                         /* Not found, create a new flow rule. */
1443                         memcpy(rule_mac, mac, sizeof(*mac));
1444                         flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
1445                                                 actions, error);
1446                         if (!flow) {
1447                                 err = -rte_errno;
1448                                 goto error;
1449                         }
1450                 }
1451                 flow->select = 1;
1452                 flow->mac = 1;
1453         }
1454         if (rule_vlan) {
1455                 vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
1456                 if (vlan < 4096)
1457                         goto next_vlan;
1458         }
1459         /* Take care of promiscuous and all multicast flow rules. */
1460         if (ETH_DEV(priv)->data->promiscuous ||
1461             ETH_DEV(priv)->data->all_multicast) {
1462                 for (flow = LIST_FIRST(&priv->flows);
1463                      flow && flow->internal;
1464                      flow = LIST_NEXT(flow, next)) {
1465                         if (ETH_DEV(priv)->data->promiscuous) {
1466                                 if (flow->promisc)
1467                                         break;
1468                         } else {
1469                                 MLX4_ASSERT(ETH_DEV(priv)->data->all_multicast);
1470                                 if (flow->allmulti)
1471                                         break;
1472                         }
1473                 }
1474                 if (flow && flow->internal) {
1475                         MLX4_ASSERT(flow->rss);
1476                         if (flow->rss->queues != queues ||
1477                             memcmp(flow->rss->queue_id, action_rss.queue,
1478                                    queues * sizeof(flow->rss->queue_id[0])))
1479                                 flow = NULL;
1480                 }
1481                 if (!flow || !flow->internal) {
1482                         /* Not found, create a new flow rule. */
1483                         if (ETH_DEV(priv)->data->promiscuous) {
1484                                 pattern[1].spec = NULL;
1485                                 pattern[1].mask = NULL;
1486                         } else {
1487                                 MLX4_ASSERT(ETH_DEV(priv)->data->all_multicast);
1488                                 pattern[1].spec = &eth_allmulti;
1489                                 pattern[1].mask = &eth_allmulti;
1490                         }
1491                         pattern[2] = pattern[3];
1492                         flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
1493                                                 actions, error);
1494                         if (!flow) {
1495                                 err = -rte_errno;
1496                                 goto error;
1497                         }
1498                 }
1499                 MLX4_ASSERT(flow->promisc || flow->allmulti);
1500                 flow->select = 1;
1501         }
1502 error:
1503         /* Clear selection and clean up stale internal flow rules. */
1504         flow = LIST_FIRST(&priv->flows);
1505         while (flow && flow->internal) {
1506                 struct rte_flow *next = LIST_NEXT(flow, next);
1507
1508                 if (!flow->select)
1509                         claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
1510                                                      error));
1511                 else
1512                         flow->select = 0;
1513                 flow = next;
1514         }
1515         return err;
1516 }
1517
1518 /**
1519  * Synchronize flow rules.
1520  *
1521  * This function synchronizes flow rules with the state of the device by
1522  * taking into account isolated mode and whether target queues are
1523  * configured.
1524  *
1525  * @param priv
1526  *   Pointer to private structure.
1527  * @param[out] error
1528  *   Perform verbose error reporting if not NULL.
1529  *
1530  * @return
1531  *   0 on success, a negative errno value otherwise and rte_errno is set.
1532  */
1533 int
1534 mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error)
1535 {
1536         struct rte_flow *flow;
1537         int ret;
1538
1539         /* Internal flow rules are guaranteed to come first in the list. */
1540         if (priv->isolated) {
1541                 /*
1542                  * Get rid of them in isolated mode, stop at the first
1543                  * non-internal rule found.
1544                  */
1545                 for (flow = LIST_FIRST(&priv->flows);
1546                      flow && flow->internal;
1547                      flow = LIST_FIRST(&priv->flows))
1548                         claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
1549                                                      error));
1550         } else {
1551                 /* Refresh internal rules. */
1552                 ret = mlx4_flow_internal(priv, error);
1553                 if (ret)
1554                         return ret;
1555         }
1556         /* Toggle the remaining flow rules . */
1557         LIST_FOREACH(flow, &priv->flows, next) {
1558                 ret = mlx4_flow_toggle(priv, flow, priv->started, error);
1559                 if (ret)
1560                         return ret;
1561         }
1562         if (!priv->started)
1563                 MLX4_ASSERT(!priv->drop);
1564         return 0;
1565 }
1566
1567 /**
1568  * Clean up all flow rules.
1569  *
1570  * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
1571  * rules regardless of whether they are internal or user-configured.
1572  *
1573  * @param priv
1574  *   Pointer to private structure.
1575  */
1576 void
1577 mlx4_flow_clean(struct mlx4_priv *priv)
1578 {
1579         struct rte_flow *flow;
1580
1581         while ((flow = LIST_FIRST(&priv->flows)))
1582                 mlx4_flow_destroy(ETH_DEV(priv), flow, NULL);
1583         MLX4_ASSERT(LIST_EMPTY(&priv->rss));
1584 }
1585
1586 static const struct rte_flow_ops mlx4_flow_ops = {
1587         .validate = mlx4_flow_validate,
1588         .create = mlx4_flow_create,
1589         .destroy = mlx4_flow_destroy,
1590         .flush = mlx4_flow_flush,
1591         .isolate = mlx4_flow_isolate,
1592 };
1593
1594 /**
1595  * Get rte_flow callbacks.
1596  *
1597  * @param dev
1598  *   Pointer to Ethernet device structure.
1599  * @param ops
1600  *   Pointer to operation-specific structure.
1601  *
1602  * @return 0
1603  */
1604 int
1605 mlx4_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
1606                   const struct rte_flow_ops **ops)
1607 {
1608         *ops = &mlx4_flow_ops;
1609         return 0;
1610 }