net: add rte prefix to ether defines
[dpdk.git] / drivers / net / bnxt / bnxt_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13
14 #include "bnxt.h"
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_vnic.h"
18 #include "bnxt_util.h"
19 #include "hsi_struct_def_dpdk.h"
20
21 static int
22 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
23                         const struct rte_flow_item pattern[],
24                         const struct rte_flow_action actions[],
25                         struct rte_flow_error *error)
26 {
27         if (!pattern) {
28                 rte_flow_error_set(error,
29                                    EINVAL,
30                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM,
31                                    NULL,
32                                    "NULL pattern.");
33                 return -rte_errno;
34         }
35
36         if (!actions) {
37                 rte_flow_error_set(error,
38                                    EINVAL,
39                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
40                                    NULL,
41                                    "NULL action.");
42                 return -rte_errno;
43         }
44
45         if (!attr) {
46                 rte_flow_error_set(error,
47                                    EINVAL,
48                                    RTE_FLOW_ERROR_TYPE_ATTR,
49                                    NULL,
50                                    "NULL attribute.");
51                 return -rte_errno;
52         }
53
54         return 0;
55 }
56
57 static const struct rte_flow_item *
58 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
59 {
60         while (1) {
61                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
62                         return cur;
63                 cur++;
64         }
65 }
66
67 static const struct rte_flow_action *
68 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
69 {
70         while (1) {
71                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
72                         return cur;
73                 cur++;
74         }
75 }
76
77 static int
78 bnxt_filter_type_check(const struct rte_flow_item pattern[],
79                        struct rte_flow_error *error __rte_unused)
80 {
81         const struct rte_flow_item *item =
82                 bnxt_flow_non_void_item(pattern);
83         int use_ntuple = 1;
84
85         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
86                 switch (item->type) {
87                 case RTE_FLOW_ITEM_TYPE_ETH:
88                         use_ntuple = 1;
89                         break;
90                 case RTE_FLOW_ITEM_TYPE_VLAN:
91                         use_ntuple = 0;
92                         break;
93                 case RTE_FLOW_ITEM_TYPE_IPV4:
94                 case RTE_FLOW_ITEM_TYPE_IPV6:
95                 case RTE_FLOW_ITEM_TYPE_TCP:
96                 case RTE_FLOW_ITEM_TYPE_UDP:
97                         /* FALLTHROUGH */
98                         /* need ntuple match, reset exact match */
99                         if (!use_ntuple) {
100                                 PMD_DRV_LOG(ERR,
101                                         "VLAN flow cannot use NTUPLE filter\n");
102                                 rte_flow_error_set
103                                         (error,
104                                          EINVAL,
105                                          RTE_FLOW_ERROR_TYPE_ITEM,
106                                          item,
107                                          "Cannot use VLAN with NTUPLE");
108                                 return -rte_errno;
109                         }
110                         use_ntuple |= 1;
111                         break;
112                 default:
113                         PMD_DRV_LOG(ERR, "Unknown Flow type\n");
114                         use_ntuple |= 1;
115                 }
116                 item++;
117         }
118         return use_ntuple;
119 }
120
121 static int
122 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
123                                   const struct rte_flow_attr *attr,
124                                   const struct rte_flow_item pattern[],
125                                   struct rte_flow_error *error,
126                                   struct bnxt_filter_info *filter)
127 {
128         const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
129         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
130         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
131         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
132         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
133         const struct rte_flow_item_udp *udp_spec, *udp_mask;
134         const struct rte_flow_item_eth *eth_spec, *eth_mask;
135         const struct rte_flow_item_nvgre *nvgre_spec;
136         const struct rte_flow_item_nvgre *nvgre_mask;
137         const struct rte_flow_item_vxlan *vxlan_spec;
138         const struct rte_flow_item_vxlan *vxlan_mask;
139         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
140         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
141         const struct rte_flow_item_vf *vf_spec;
142         uint32_t tenant_id_be = 0;
143         bool vni_masked = 0;
144         bool tni_masked = 0;
145         uint32_t vf = 0;
146         int use_ntuple;
147         uint32_t en = 0;
148         uint32_t en_ethertype;
149         int dflt_vnic;
150
151         use_ntuple = bnxt_filter_type_check(pattern, error);
152         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
153         if (use_ntuple < 0)
154                 return use_ntuple;
155
156         filter->filter_type = use_ntuple ?
157                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
158         en_ethertype = use_ntuple ?
159                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
160                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
161
162         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
163                 if (item->last) {
164                         /* last or range is NOT supported as match criteria */
165                         rte_flow_error_set(error, EINVAL,
166                                            RTE_FLOW_ERROR_TYPE_ITEM,
167                                            item,
168                                            "No support for range");
169                         return -rte_errno;
170                 }
171
172                 if (!item->spec || !item->mask) {
173                         rte_flow_error_set(error, EINVAL,
174                                            RTE_FLOW_ERROR_TYPE_ITEM,
175                                            item,
176                                            "spec/mask is NULL");
177                         return -rte_errno;
178                 }
179
180                 switch (item->type) {
181                 case RTE_FLOW_ITEM_TYPE_ETH:
182                         eth_spec = item->spec;
183                         eth_mask = item->mask;
184
185                         /* Source MAC address mask cannot be partially set.
186                          * Should be All 0's or all 1's.
187                          * Destination MAC address mask must not be partially
188                          * set. Should be all 1's or all 0's.
189                          */
190                         if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
191                              !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
192                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
193                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
194                                 rte_flow_error_set(error,
195                                                    EINVAL,
196                                                    RTE_FLOW_ERROR_TYPE_ITEM,
197                                                    item,
198                                                    "MAC_addr mask not valid");
199                                 return -rte_errno;
200                         }
201
202                         /* Mask is not allowed. Only exact matches are */
203                         if (eth_mask->type &&
204                             eth_mask->type != RTE_BE16(0xffff)) {
205                                 rte_flow_error_set(error, EINVAL,
206                                                    RTE_FLOW_ERROR_TYPE_ITEM,
207                                                    item,
208                                                    "ethertype mask not valid");
209                                 return -rte_errno;
210                         }
211
212                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
213                                 rte_memcpy(filter->dst_macaddr,
214                                            &eth_spec->dst, 6);
215                                 en |= use_ntuple ?
216                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
217                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
218                         }
219
220                         if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
221                                 rte_memcpy(filter->src_macaddr,
222                                            &eth_spec->src, 6);
223                                 en |= use_ntuple ?
224                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
225                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
226                         } /*
227                            * else {
228                            *  PMD_DRV_LOG(ERR, "Handle this condition\n");
229                            * }
230                            */
231                         if (eth_mask->type) {
232                                 filter->ethertype =
233                                         rte_be_to_cpu_16(eth_spec->type);
234                                 en |= en_ethertype;
235                         }
236
237                         break;
238                 case RTE_FLOW_ITEM_TYPE_VLAN:
239                         vlan_spec = item->spec;
240                         vlan_mask = item->mask;
241                         if (en & en_ethertype) {
242                                 rte_flow_error_set(error, EINVAL,
243                                                    RTE_FLOW_ERROR_TYPE_ITEM,
244                                                    item,
245                                                    "VLAN TPID matching is not"
246                                                    " supported");
247                                 return -rte_errno;
248                         }
249                         if (vlan_mask->tci &&
250                             vlan_mask->tci == RTE_BE16(0x0fff)) {
251                                 /* Only the VLAN ID can be matched. */
252                                 filter->l2_ovlan =
253                                         rte_be_to_cpu_16(vlan_spec->tci &
254                                                          RTE_BE16(0x0fff));
255                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
256                         } else {
257                                 rte_flow_error_set(error,
258                                                    EINVAL,
259                                                    RTE_FLOW_ERROR_TYPE_ITEM,
260                                                    item,
261                                                    "VLAN mask is invalid");
262                                 return -rte_errno;
263                         }
264                         if (vlan_mask->inner_type &&
265                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
266                                 rte_flow_error_set(error, EINVAL,
267                                                    RTE_FLOW_ERROR_TYPE_ITEM,
268                                                    item,
269                                                    "inner ethertype mask not"
270                                                    " valid");
271                                 return -rte_errno;
272                         }
273                         if (vlan_mask->inner_type) {
274                                 filter->ethertype =
275                                         rte_be_to_cpu_16(vlan_spec->inner_type);
276                                 en |= en_ethertype;
277                         }
278
279                         break;
280                 case RTE_FLOW_ITEM_TYPE_IPV4:
281                         /* If mask is not involved, we could use EM filters. */
282                         ipv4_spec = item->spec;
283                         ipv4_mask = item->mask;
284                         /* Only IP DST and SRC fields are maskable. */
285                         if (ipv4_mask->hdr.version_ihl ||
286                             ipv4_mask->hdr.type_of_service ||
287                             ipv4_mask->hdr.total_length ||
288                             ipv4_mask->hdr.packet_id ||
289                             ipv4_mask->hdr.fragment_offset ||
290                             ipv4_mask->hdr.time_to_live ||
291                             ipv4_mask->hdr.next_proto_id ||
292                             ipv4_mask->hdr.hdr_checksum) {
293                                 rte_flow_error_set(error,
294                                                    EINVAL,
295                                                    RTE_FLOW_ERROR_TYPE_ITEM,
296                                                    item,
297                                                    "Invalid IPv4 mask.");
298                                 return -rte_errno;
299                         }
300
301                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
302                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
303
304                         if (use_ntuple)
305                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
306                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
307                         else
308                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
309                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
310
311                         if (ipv4_mask->hdr.src_addr) {
312                                 filter->src_ipaddr_mask[0] =
313                                         ipv4_mask->hdr.src_addr;
314                                 en |= !use_ntuple ? 0 :
315                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
316                         }
317
318                         if (ipv4_mask->hdr.dst_addr) {
319                                 filter->dst_ipaddr_mask[0] =
320                                         ipv4_mask->hdr.dst_addr;
321                                 en |= !use_ntuple ? 0 :
322                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
323                         }
324
325                         filter->ip_addr_type = use_ntuple ?
326                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
327                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
328
329                         if (ipv4_spec->hdr.next_proto_id) {
330                                 filter->ip_protocol =
331                                         ipv4_spec->hdr.next_proto_id;
332                                 if (use_ntuple)
333                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
334                                 else
335                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
336                         }
337                         break;
338                 case RTE_FLOW_ITEM_TYPE_IPV6:
339                         ipv6_spec = item->spec;
340                         ipv6_mask = item->mask;
341
342                         /* Only IP DST and SRC fields are maskable. */
343                         if (ipv6_mask->hdr.vtc_flow ||
344                             ipv6_mask->hdr.payload_len ||
345                             ipv6_mask->hdr.proto ||
346                             ipv6_mask->hdr.hop_limits) {
347                                 rte_flow_error_set(error,
348                                                    EINVAL,
349                                                    RTE_FLOW_ERROR_TYPE_ITEM,
350                                                    item,
351                                                    "Invalid IPv6 mask.");
352                                 return -rte_errno;
353                         }
354
355                         if (use_ntuple)
356                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
357                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
358                         else
359                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
360                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
361
362                         rte_memcpy(filter->src_ipaddr,
363                                    ipv6_spec->hdr.src_addr, 16);
364                         rte_memcpy(filter->dst_ipaddr,
365                                    ipv6_spec->hdr.dst_addr, 16);
366
367                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
368                                                    16)) {
369                                 rte_memcpy(filter->src_ipaddr_mask,
370                                            ipv6_mask->hdr.src_addr, 16);
371                                 en |= !use_ntuple ? 0 :
372                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
373                         }
374
375                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
376                                                    16)) {
377                                 rte_memcpy(filter->dst_ipaddr_mask,
378                                            ipv6_mask->hdr.dst_addr, 16);
379                                 en |= !use_ntuple ? 0 :
380                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
381                         }
382
383                         filter->ip_addr_type = use_ntuple ?
384                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
385                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
386                         break;
387                 case RTE_FLOW_ITEM_TYPE_TCP:
388                         tcp_spec = item->spec;
389                         tcp_mask = item->mask;
390
391                         /* Check TCP mask. Only DST & SRC ports are maskable */
392                         if (tcp_mask->hdr.sent_seq ||
393                             tcp_mask->hdr.recv_ack ||
394                             tcp_mask->hdr.data_off ||
395                             tcp_mask->hdr.tcp_flags ||
396                             tcp_mask->hdr.rx_win ||
397                             tcp_mask->hdr.cksum ||
398                             tcp_mask->hdr.tcp_urp) {
399                                 rte_flow_error_set(error,
400                                                    EINVAL,
401                                                    RTE_FLOW_ERROR_TYPE_ITEM,
402                                                    item,
403                                                    "Invalid TCP mask");
404                                 return -rte_errno;
405                         }
406
407                         filter->src_port = tcp_spec->hdr.src_port;
408                         filter->dst_port = tcp_spec->hdr.dst_port;
409
410                         if (use_ntuple)
411                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
412                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
413                         else
414                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
415                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
416
417                         if (tcp_mask->hdr.dst_port) {
418                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
419                                 en |= !use_ntuple ? 0 :
420                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
421                         }
422
423                         if (tcp_mask->hdr.src_port) {
424                                 filter->src_port_mask = tcp_mask->hdr.src_port;
425                                 en |= !use_ntuple ? 0 :
426                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
427                         }
428                         break;
429                 case RTE_FLOW_ITEM_TYPE_UDP:
430                         udp_spec = item->spec;
431                         udp_mask = item->mask;
432
433                         if (udp_mask->hdr.dgram_len ||
434                             udp_mask->hdr.dgram_cksum) {
435                                 rte_flow_error_set(error,
436                                                    EINVAL,
437                                                    RTE_FLOW_ERROR_TYPE_ITEM,
438                                                    item,
439                                                    "Invalid UDP mask");
440                                 return -rte_errno;
441                         }
442
443                         filter->src_port = udp_spec->hdr.src_port;
444                         filter->dst_port = udp_spec->hdr.dst_port;
445
446                         if (use_ntuple)
447                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
448                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
449                         else
450                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
451                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
452
453                         if (udp_mask->hdr.dst_port) {
454                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
455                                 en |= !use_ntuple ? 0 :
456                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
457                         }
458
459                         if (udp_mask->hdr.src_port) {
460                                 filter->src_port_mask = udp_mask->hdr.src_port;
461                                 en |= !use_ntuple ? 0 :
462                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
463                         }
464                         break;
465                 case RTE_FLOW_ITEM_TYPE_VXLAN:
466                         vxlan_spec = item->spec;
467                         vxlan_mask = item->mask;
468                         /* Check if VXLAN item is used to describe protocol.
469                          * If yes, both spec and mask should be NULL.
470                          * If no, both spec and mask shouldn't be NULL.
471                          */
472                         if ((!vxlan_spec && vxlan_mask) ||
473                             (vxlan_spec && !vxlan_mask)) {
474                                 rte_flow_error_set(error,
475                                                    EINVAL,
476                                                    RTE_FLOW_ERROR_TYPE_ITEM,
477                                                    item,
478                                                    "Invalid VXLAN item");
479                                 return -rte_errno;
480                         }
481
482                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
483                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
484                             vxlan_spec->flags != 0x8) {
485                                 rte_flow_error_set(error,
486                                                    EINVAL,
487                                                    RTE_FLOW_ERROR_TYPE_ITEM,
488                                                    item,
489                                                    "Invalid VXLAN item");
490                                 return -rte_errno;
491                         }
492
493                         /* Check if VNI is masked. */
494                         if (vxlan_spec && vxlan_mask) {
495                                 vni_masked =
496                                         !!memcmp(vxlan_mask->vni, vni_mask,
497                                                  RTE_DIM(vni_mask));
498                                 if (vni_masked) {
499                                         rte_flow_error_set
500                                                 (error,
501                                                  EINVAL,
502                                                  RTE_FLOW_ERROR_TYPE_ITEM,
503                                                  item,
504                                                  "Invalid VNI mask");
505                                         return -rte_errno;
506                                 }
507
508                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
509                                            vxlan_spec->vni, 3);
510                                 filter->vni =
511                                         rte_be_to_cpu_32(tenant_id_be);
512                                 filter->tunnel_type =
513                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
514                         }
515                         break;
516                 case RTE_FLOW_ITEM_TYPE_NVGRE:
517                         nvgre_spec = item->spec;
518                         nvgre_mask = item->mask;
519                         /* Check if NVGRE item is used to describe protocol.
520                          * If yes, both spec and mask should be NULL.
521                          * If no, both spec and mask shouldn't be NULL.
522                          */
523                         if ((!nvgre_spec && nvgre_mask) ||
524                             (nvgre_spec && !nvgre_mask)) {
525                                 rte_flow_error_set(error,
526                                                    EINVAL,
527                                                    RTE_FLOW_ERROR_TYPE_ITEM,
528                                                    item,
529                                                    "Invalid NVGRE item");
530                                 return -rte_errno;
531                         }
532
533                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
534                             nvgre_spec->protocol != 0x6558) {
535                                 rte_flow_error_set(error,
536                                                    EINVAL,
537                                                    RTE_FLOW_ERROR_TYPE_ITEM,
538                                                    item,
539                                                    "Invalid NVGRE item");
540                                 return -rte_errno;
541                         }
542
543                         if (nvgre_spec && nvgre_mask) {
544                                 tni_masked =
545                                         !!memcmp(nvgre_mask->tni, tni_mask,
546                                                  RTE_DIM(tni_mask));
547                                 if (tni_masked) {
548                                         rte_flow_error_set
549                                                 (error,
550                                                  EINVAL,
551                                                  RTE_FLOW_ERROR_TYPE_ITEM,
552                                                  item,
553                                                  "Invalid TNI mask");
554                                         return -rte_errno;
555                                 }
556                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
557                                            nvgre_spec->tni, 3);
558                                 filter->vni =
559                                         rte_be_to_cpu_32(tenant_id_be);
560                                 filter->tunnel_type =
561                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
562                         }
563                         break;
564                 case RTE_FLOW_ITEM_TYPE_VF:
565                         vf_spec = item->spec;
566                         vf = vf_spec->id;
567
568                         if (!BNXT_PF(bp)) {
569                                 rte_flow_error_set(error,
570                                                    EINVAL,
571                                                    RTE_FLOW_ERROR_TYPE_ITEM,
572                                                    item,
573                                                    "Configuring on a VF!");
574                                 return -rte_errno;
575                         }
576
577                         if (vf >= bp->pdev->max_vfs) {
578                                 rte_flow_error_set(error,
579                                                    EINVAL,
580                                                    RTE_FLOW_ERROR_TYPE_ITEM,
581                                                    item,
582                                                    "Incorrect VF id!");
583                                 return -rte_errno;
584                         }
585
586                         if (!attr->transfer) {
587                                 rte_flow_error_set(error,
588                                                    ENOTSUP,
589                                                    RTE_FLOW_ERROR_TYPE_ITEM,
590                                                    item,
591                                                    "Matching VF traffic without"
592                                                    " affecting it (transfer attribute)"
593                                                    " is unsupported");
594                                 return -rte_errno;
595                         }
596
597                         filter->mirror_vnic_id =
598                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
599                         if (dflt_vnic < 0) {
600                                 /* This simply indicates there's no driver
601                                  * loaded. This is not an error.
602                                  */
603                                 rte_flow_error_set
604                                         (error,
605                                          EINVAL,
606                                          RTE_FLOW_ERROR_TYPE_ITEM,
607                                          item,
608                                          "Unable to get default VNIC for VF");
609                                 return -rte_errno;
610                         }
611
612                         filter->mirror_vnic_id = dflt_vnic;
613                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
614                         break;
615                 default:
616                         break;
617                 }
618                 item++;
619         }
620         filter->enables = en;
621
622         return 0;
623 }
624
625 /* Parse attributes */
626 static int
627 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
628                      struct rte_flow_error *error)
629 {
630         /* Must be input direction */
631         if (!attr->ingress) {
632                 rte_flow_error_set(error,
633                                    EINVAL,
634                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
635                                    attr,
636                                    "Only support ingress.");
637                 return -rte_errno;
638         }
639
640         /* Not supported */
641         if (attr->egress) {
642                 rte_flow_error_set(error,
643                                    EINVAL,
644                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
645                                    attr,
646                                    "No support for egress.");
647                 return -rte_errno;
648         }
649
650         /* Not supported */
651         if (attr->priority) {
652                 rte_flow_error_set(error,
653                                    EINVAL,
654                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
655                                    attr,
656                                    "No support for priority.");
657                 return -rte_errno;
658         }
659
660         /* Not supported */
661         if (attr->group) {
662                 rte_flow_error_set(error,
663                                    EINVAL,
664                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
665                                    attr,
666                                    "No support for group.");
667                 return -rte_errno;
668         }
669
670         return 0;
671 }
672
673 struct bnxt_filter_info *
674 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
675                    struct bnxt_vnic_info *vnic)
676 {
677         struct bnxt_filter_info *filter1, *f0;
678         struct bnxt_vnic_info *vnic0;
679         int rc;
680
681         vnic0 = &bp->vnic_info[0];
682         f0 = STAILQ_FIRST(&vnic0->filter);
683
684         /* This flow has same DST MAC as the port/l2 filter. */
685         if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
686                 return f0;
687
688         /* This flow needs DST MAC which is not same as port/l2 */
689         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
690         filter1 = bnxt_get_unused_filter(bp);
691         if (filter1 == NULL)
692                 return NULL;
693
694         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
695         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
696                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
697         memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
698         memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
699         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
700                                      filter1);
701         if (rc) {
702                 bnxt_free_filter(bp, filter1);
703                 return NULL;
704         }
705         return filter1;
706 }
707
708 static int
709 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
710                              const struct rte_flow_item pattern[],
711                              const struct rte_flow_action actions[],
712                              const struct rte_flow_attr *attr,
713                              struct rte_flow_error *error,
714                              struct bnxt_filter_info *filter)
715 {
716         const struct rte_flow_action *act =
717                 bnxt_flow_non_void_action(actions);
718         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
719         const struct rte_flow_action_queue *act_q;
720         const struct rte_flow_action_vf *act_vf;
721         struct bnxt_vnic_info *vnic, *vnic0;
722         struct bnxt_filter_info *filter1;
723         uint32_t vf = 0;
724         int dflt_vnic;
725         int rc;
726
727         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
728                 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
729                 rte_flow_error_set(error,
730                                    EINVAL,
731                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
732                                    NULL,
733                                    "Cannot create flow on RSS queues");
734                 rc = -rte_errno;
735                 goto ret;
736         }
737
738         rc =
739         bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
740         if (rc != 0)
741                 goto ret;
742
743         rc = bnxt_flow_parse_attr(attr, error);
744         if (rc != 0)
745                 goto ret;
746
747         /* Since we support ingress attribute only - right now. */
748         if (filter->filter_type == HWRM_CFA_EM_FILTER)
749                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
750
751         switch (act->type) {
752         case RTE_FLOW_ACTION_TYPE_QUEUE:
753                 /* Allow this flow. Redirect to a VNIC. */
754                 act_q = (const struct rte_flow_action_queue *)act->conf;
755                 if (act_q->index >= bp->rx_nr_rings) {
756                         rte_flow_error_set(error,
757                                            EINVAL,
758                                            RTE_FLOW_ERROR_TYPE_ACTION,
759                                            act,
760                                            "Invalid queue ID.");
761                         rc = -rte_errno;
762                         goto ret;
763                 }
764                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
765
766                 vnic0 = &bp->vnic_info[0];
767                 vnic =  &bp->vnic_info[act_q->index];
768                 if (vnic == NULL) {
769                         rte_flow_error_set(error,
770                                            EINVAL,
771                                            RTE_FLOW_ERROR_TYPE_ACTION,
772                                            act,
773                                            "No matching VNIC for queue ID.");
774                         rc = -rte_errno;
775                         goto ret;
776                 }
777
778                 filter->dst_id = vnic->fw_vnic_id;
779                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
780                 if (filter1 == NULL) {
781                         rc = -ENOSPC;
782                         goto ret;
783                 }
784
785                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
786                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
787                 break;
788         case RTE_FLOW_ACTION_TYPE_DROP:
789                 vnic0 = &bp->vnic_info[0];
790                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
791                 if (filter1 == NULL) {
792                         rc = -ENOSPC;
793                         goto ret;
794                 }
795
796                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
797                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
798                         filter->flags =
799                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
800                 else
801                         filter->flags =
802                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
803                 break;
804         case RTE_FLOW_ACTION_TYPE_COUNT:
805                 vnic0 = &bp->vnic_info[0];
806                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
807                 if (filter1 == NULL) {
808                         rc = -ENOSPC;
809                         goto ret;
810                 }
811
812                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
813                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
814                 break;
815         case RTE_FLOW_ACTION_TYPE_VF:
816                 act_vf = (const struct rte_flow_action_vf *)act->conf;
817                 vf = act_vf->id;
818
819                 if (!BNXT_PF(bp)) {
820                         rte_flow_error_set(error,
821                                            EINVAL,
822                                            RTE_FLOW_ERROR_TYPE_ACTION,
823                                            act,
824                                            "Configuring on a VF!");
825                         rc = -rte_errno;
826                         goto ret;
827                 }
828
829                 if (vf >= bp->pdev->max_vfs) {
830                         rte_flow_error_set(error,
831                                            EINVAL,
832                                            RTE_FLOW_ERROR_TYPE_ACTION,
833                                            act,
834                                            "Incorrect VF id!");
835                         rc = -rte_errno;
836                         goto ret;
837                 }
838
839                 filter->mirror_vnic_id =
840                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
841                 if (dflt_vnic < 0) {
842                         /* This simply indicates there's no driver loaded.
843                          * This is not an error.
844                          */
845                         rte_flow_error_set(error,
846                                            EINVAL,
847                                            RTE_FLOW_ERROR_TYPE_ACTION,
848                                            act,
849                                            "Unable to get default VNIC for VF");
850                         rc = -rte_errno;
851                         goto ret;
852                 }
853
854                 filter->mirror_vnic_id = dflt_vnic;
855                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
856
857                 vnic0 = &bp->vnic_info[0];
858                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
859                 if (filter1 == NULL) {
860                         rc = -ENOSPC;
861                         goto ret;
862                 }
863
864                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
865                 break;
866
867         default:
868                 rte_flow_error_set(error,
869                                    EINVAL,
870                                    RTE_FLOW_ERROR_TYPE_ACTION,
871                                    act,
872                                    "Invalid action.");
873                 rc = -rte_errno;
874                 goto ret;
875         }
876
877         if (filter1) {
878                 bnxt_free_filter(bp, filter1);
879                 filter1->fw_l2_filter_id = -1;
880         }
881
882         act = bnxt_flow_non_void_action(++act);
883         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
884                 rte_flow_error_set(error,
885                                    EINVAL,
886                                    RTE_FLOW_ERROR_TYPE_ACTION,
887                                    act,
888                                    "Invalid action.");
889                 rc = -rte_errno;
890                 goto ret;
891         }
892 ret:
893         return rc;
894 }
895
896 static int
897 bnxt_flow_validate(struct rte_eth_dev *dev,
898                    const struct rte_flow_attr *attr,
899                    const struct rte_flow_item pattern[],
900                    const struct rte_flow_action actions[],
901                    struct rte_flow_error *error)
902 {
903         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
904         struct bnxt_filter_info *filter;
905         int ret = 0;
906
907         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
908         if (ret != 0)
909                 return ret;
910
911         filter = bnxt_get_unused_filter(bp);
912         if (filter == NULL) {
913                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
914                 return -ENOMEM;
915         }
916
917         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
918                                            error, filter);
919         /* No need to hold on to this filter if we are just validating flow */
920         filter->fw_l2_filter_id = UINT64_MAX;
921         bnxt_free_filter(bp, filter);
922
923         return ret;
924 }
925
926 static int
927 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
928 {
929         struct bnxt_filter_info *mf;
930         struct rte_flow *flow;
931         int i;
932
933         for (i = bp->nr_vnics - 1; i >= 0; i--) {
934                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
935
936                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
937                         mf = flow->filter;
938
939                         if (mf->filter_type == nf->filter_type &&
940                             mf->flags == nf->flags &&
941                             mf->src_port == nf->src_port &&
942                             mf->src_port_mask == nf->src_port_mask &&
943                             mf->dst_port == nf->dst_port &&
944                             mf->dst_port_mask == nf->dst_port_mask &&
945                             mf->ip_protocol == nf->ip_protocol &&
946                             mf->ip_addr_type == nf->ip_addr_type &&
947                             mf->ethertype == nf->ethertype &&
948                             mf->vni == nf->vni &&
949                             mf->tunnel_type == nf->tunnel_type &&
950                             mf->l2_ovlan == nf->l2_ovlan &&
951                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
952                             mf->l2_ivlan == nf->l2_ivlan &&
953                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
954                             !memcmp(mf->l2_addr, nf->l2_addr,
955                                     RTE_ETHER_ADDR_LEN) &&
956                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
957                                     RTE_ETHER_ADDR_LEN) &&
958                             !memcmp(mf->src_macaddr, nf->src_macaddr,
959                                     RTE_ETHER_ADDR_LEN) &&
960                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
961                                     RTE_ETHER_ADDR_LEN) &&
962                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
963                                     sizeof(nf->src_ipaddr)) &&
964                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
965                                     sizeof(nf->src_ipaddr_mask)) &&
966                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
967                                     sizeof(nf->dst_ipaddr)) &&
968                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
969                                     sizeof(nf->dst_ipaddr_mask))) {
970                                 if (mf->dst_id == nf->dst_id)
971                                         return -EEXIST;
972                                 /*
973                                  * Same Flow, Different queue
974                                  * Clear the old ntuple filter
975                                  * Reuse the matching L2 filter
976                                  * ID for the new filter
977                                  */
978                                 nf->fw_l2_filter_id = mf->fw_l2_filter_id;
979                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
980                                         bnxt_hwrm_clear_em_filter(bp, mf);
981                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
982                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
983                                 /* Free the old filter, update flow
984                                  * with new filter
985                                  */
986                                 bnxt_free_filter(bp, mf);
987                                 flow->filter = nf;
988                                 return -EXDEV;
989                         }
990                 }
991         }
992         return 0;
993 }
994
995 static struct rte_flow *
996 bnxt_flow_create(struct rte_eth_dev *dev,
997                  const struct rte_flow_attr *attr,
998                  const struct rte_flow_item pattern[],
999                  const struct rte_flow_action actions[],
1000                  struct rte_flow_error *error)
1001 {
1002         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1003         struct bnxt_filter_info *filter;
1004         struct bnxt_vnic_info *vnic = NULL;
1005         bool update_flow = false;
1006         struct rte_flow *flow;
1007         unsigned int i;
1008         int ret = 0;
1009
1010         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1011         if (!flow) {
1012                 rte_flow_error_set(error, ENOMEM,
1013                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1014                                    "Failed to allocate memory");
1015                 return flow;
1016         }
1017
1018         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1019         if (ret != 0) {
1020                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1021                 goto free_flow;
1022         }
1023
1024         filter = bnxt_get_unused_filter(bp);
1025         if (filter == NULL) {
1026                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1027                 goto free_flow;
1028         }
1029
1030         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1031                                            error, filter);
1032         if (ret != 0)
1033                 goto free_filter;
1034
1035         ret = bnxt_match_filter(bp, filter);
1036         if (ret == -EEXIST) {
1037                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1038                 /* Clear the filter that was created as part of
1039                  * validate_and_parse_flow() above
1040                  */
1041                 bnxt_hwrm_clear_l2_filter(bp, filter);
1042                 goto free_filter;
1043         } else if (ret == -EXDEV) {
1044                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1045                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1046                 update_flow = true;
1047         }
1048
1049         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1050                 filter->enables |=
1051                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1052                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1053         }
1054
1055         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1056                 filter->enables |=
1057                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1058                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1059         }
1060
1061         for (i = 0; i < bp->nr_vnics; i++) {
1062                 vnic = &bp->vnic_info[i];
1063                 if (filter->dst_id == vnic->fw_vnic_id)
1064                         break;
1065         }
1066
1067         if (!ret) {
1068                 flow->filter = filter;
1069                 flow->vnic = vnic;
1070                 if (update_flow) {
1071                         ret = -EXDEV;
1072                         goto free_flow;
1073                 }
1074                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1075                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1076                 return flow;
1077         }
1078 free_filter:
1079         bnxt_free_filter(bp, filter);
1080 free_flow:
1081         if (ret == -EEXIST)
1082                 rte_flow_error_set(error, ret,
1083                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1084                                    "Matching Flow exists.");
1085         else if (ret == -EXDEV)
1086                 rte_flow_error_set(error, ret,
1087                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1088                                    "Flow with pattern exists, updating destination queue");
1089         else
1090                 rte_flow_error_set(error, -ret,
1091                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1092                                    "Failed to create flow.");
1093         rte_free(flow);
1094         flow = NULL;
1095         return flow;
1096 }
1097
1098 static int
1099 bnxt_flow_destroy(struct rte_eth_dev *dev,
1100                   struct rte_flow *flow,
1101                   struct rte_flow_error *error)
1102 {
1103         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1104         struct bnxt_filter_info *filter = flow->filter;
1105         struct bnxt_vnic_info *vnic = flow->vnic;
1106         int ret = 0;
1107
1108         ret = bnxt_match_filter(bp, filter);
1109         if (ret == 0)
1110                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1111         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1112                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1113         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1114                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1115         else
1116                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1117         if (!ret) {
1118                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1119                 rte_free(flow);
1120         } else {
1121                 rte_flow_error_set(error, -ret,
1122                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1123                                    "Failed to destroy flow.");
1124         }
1125
1126         return ret;
1127 }
1128
1129 static int
1130 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1131 {
1132         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1133         struct bnxt_vnic_info *vnic;
1134         struct rte_flow *flow;
1135         unsigned int i;
1136         int ret = 0;
1137
1138         for (i = 0; i < bp->nr_vnics; i++) {
1139                 vnic = &bp->vnic_info[i];
1140                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1141                         struct bnxt_filter_info *filter = flow->filter;
1142
1143                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1144                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1145                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1146                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1147
1148                         if (ret) {
1149                                 rte_flow_error_set
1150                                         (error,
1151                                          -ret,
1152                                          RTE_FLOW_ERROR_TYPE_HANDLE,
1153                                          NULL,
1154                                          "Failed to flush flow in HW.");
1155                                 return -rte_errno;
1156                         }
1157
1158                         STAILQ_REMOVE(&vnic->flow_list, flow,
1159                                       rte_flow, next);
1160                         rte_free(flow);
1161                 }
1162         }
1163
1164         return ret;
1165 }
1166
1167 const struct rte_flow_ops bnxt_flow_ops = {
1168         .validate = bnxt_flow_validate,
1169         .create = bnxt_flow_create,
1170         .destroy = bnxt_flow_destroy,
1171         .flush = bnxt_flow_flush,
1172 };