net/bnxt: fix NAT template
[dpdk.git] / drivers / net / bnxt / bnxt_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13 #include <rte_alarm.h>
14 #include <rte_cycles.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_ring.h"
20 #include "bnxt_rxq.h"
21 #include "bnxt_rxr.h"
22 #include "bnxt_vnic.h"
23 #include "hsi_struct_def_dpdk.h"
24
25 static int
26 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
27                         const struct rte_flow_item pattern[],
28                         const struct rte_flow_action actions[],
29                         struct rte_flow_error *error)
30 {
31         if (!pattern) {
32                 rte_flow_error_set(error,
33                                    EINVAL,
34                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM,
35                                    NULL,
36                                    "NULL pattern.");
37                 return -rte_errno;
38         }
39
40         if (!actions) {
41                 rte_flow_error_set(error,
42                                    EINVAL,
43                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
44                                    NULL,
45                                    "NULL action.");
46                 return -rte_errno;
47         }
48
49         if (!attr) {
50                 rte_flow_error_set(error,
51                                    EINVAL,
52                                    RTE_FLOW_ERROR_TYPE_ATTR,
53                                    NULL,
54                                    "NULL attribute.");
55                 return -rte_errno;
56         }
57
58         return 0;
59 }
60
61 static const struct rte_flow_item *
62 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
63 {
64         while (1) {
65                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
66                         return cur;
67                 cur++;
68         }
69 }
70
71 static const struct rte_flow_action *
72 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
73 {
74         while (1) {
75                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
76                         return cur;
77                 cur++;
78         }
79 }
80
81 static int
82 bnxt_filter_type_check(const struct rte_flow_item pattern[],
83                        struct rte_flow_error *error)
84 {
85         const struct rte_flow_item *item =
86                 bnxt_flow_non_void_item(pattern);
87         int use_ntuple = 1;
88         bool has_vlan = 0;
89
90         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
91                 switch (item->type) {
92                 case RTE_FLOW_ITEM_TYPE_ANY:
93                 case RTE_FLOW_ITEM_TYPE_ETH:
94                         use_ntuple = 0;
95                         break;
96                 case RTE_FLOW_ITEM_TYPE_VLAN:
97                         use_ntuple = 0;
98                         has_vlan = 1;
99                         break;
100                 case RTE_FLOW_ITEM_TYPE_IPV4:
101                 case RTE_FLOW_ITEM_TYPE_IPV6:
102                 case RTE_FLOW_ITEM_TYPE_TCP:
103                 case RTE_FLOW_ITEM_TYPE_UDP:
104                         /* FALLTHROUGH */
105                         /* need ntuple match, reset exact match */
106                         use_ntuple |= 1;
107                         break;
108                 default:
109                         PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
110                         use_ntuple |= 0;
111                 }
112                 item++;
113         }
114
115         if (has_vlan && use_ntuple) {
116                 PMD_DRV_LOG(ERR,
117                             "VLAN flow cannot use NTUPLE filter\n");
118                 rte_flow_error_set(error, EINVAL,
119                                    RTE_FLOW_ERROR_TYPE_ITEM,
120                                    item,
121                                    "Cannot use VLAN with NTUPLE");
122                 return -rte_errno;
123         }
124
125         return use_ntuple;
126 }
127
128 static int
129 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
130                                   const struct rte_flow_attr *attr,
131                                   const struct rte_flow_item pattern[],
132                                   struct rte_flow_error *error,
133                                   struct bnxt_filter_info *filter)
134 {
135         const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
136         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
137         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
138         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
139         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
140         const struct rte_flow_item_udp *udp_spec, *udp_mask;
141         const struct rte_flow_item_eth *eth_spec, *eth_mask;
142         const struct rte_ether_addr *dst, *src;
143         const struct rte_flow_item_nvgre *nvgre_spec;
144         const struct rte_flow_item_nvgre *nvgre_mask;
145         const struct rte_flow_item_gre *gre_spec;
146         const struct rte_flow_item_gre *gre_mask;
147         const struct rte_flow_item_vxlan *vxlan_spec;
148         const struct rte_flow_item_vxlan *vxlan_mask;
149         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
150         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
151         const struct rte_flow_item_vf *vf_spec;
152         uint32_t tenant_id_be = 0, valid_flags = 0;
153         bool vni_masked = 0;
154         bool tni_masked = 0;
155         uint32_t en_ethertype;
156         uint8_t inner = 0;
157         uint32_t vf = 0;
158         uint32_t en = 0;
159         int use_ntuple;
160         int dflt_vnic;
161
162         use_ntuple = bnxt_filter_type_check(pattern, error);
163         if (use_ntuple < 0)
164                 return use_ntuple;
165         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
166
167         filter->filter_type = use_ntuple ?
168                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
169         en_ethertype = use_ntuple ?
170                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
171                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
172
173         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
174                 if (item->last) {
175                         /* last or range is NOT supported as match criteria */
176                         rte_flow_error_set(error, EINVAL,
177                                            RTE_FLOW_ERROR_TYPE_ITEM,
178                                            item,
179                                            "No support for range");
180                         return -rte_errno;
181                 }
182
183                 switch (item->type) {
184                 case RTE_FLOW_ITEM_TYPE_ANY:
185                         inner =
186                         ((const struct rte_flow_item_any *)item->spec)->num > 3;
187                         if (inner)
188                                 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
189                         break;
190                 case RTE_FLOW_ITEM_TYPE_ETH:
191                         if (!item->spec || !item->mask)
192                                 break;
193
194                         eth_spec = item->spec;
195                         eth_mask = item->mask;
196
197                         /* Source MAC address mask cannot be partially set.
198                          * Should be All 0's or all 1's.
199                          * Destination MAC address mask must not be partially
200                          * set. Should be all 1's or all 0's.
201                          */
202                         if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
203                              !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
204                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
205                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
206                                 rte_flow_error_set(error,
207                                                    EINVAL,
208                                                    RTE_FLOW_ERROR_TYPE_ITEM,
209                                                    item,
210                                                    "MAC_addr mask not valid");
211                                 return -rte_errno;
212                         }
213
214                         /* Mask is not allowed. Only exact matches are */
215                         if (eth_mask->type &&
216                             eth_mask->type != RTE_BE16(0xffff)) {
217                                 rte_flow_error_set(error, EINVAL,
218                                                    RTE_FLOW_ERROR_TYPE_ITEM,
219                                                    item,
220                                                    "ethertype mask not valid");
221                                 return -rte_errno;
222                         }
223
224                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
225                                 dst = &eth_spec->dst;
226                                 if (!rte_is_valid_assigned_ether_addr(dst)) {
227                                         rte_flow_error_set(error,
228                                                            EINVAL,
229                                                            RTE_FLOW_ERROR_TYPE_ITEM,
230                                                            item,
231                                                            "DMAC is invalid");
232                                         PMD_DRV_LOG(ERR,
233                                                     "DMAC is invalid!\n");
234                                         return -rte_errno;
235                                 }
236                                 rte_memcpy(filter->dst_macaddr,
237                                            &eth_spec->dst, RTE_ETHER_ADDR_LEN);
238                                 en |= use_ntuple ?
239                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
240                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
241                                 valid_flags |= inner ?
242                                         BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
243                                         BNXT_FLOW_L2_DST_VALID_FLAG;
244                                 filter->priority = attr->priority;
245                                 PMD_DRV_LOG(DEBUG,
246                                             "Creating a priority flow\n");
247                         }
248                         if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
249                                 src = &eth_spec->src;
250                                 if (!rte_is_valid_assigned_ether_addr(src)) {
251                                         rte_flow_error_set(error,
252                                                            EINVAL,
253                                                            RTE_FLOW_ERROR_TYPE_ITEM,
254                                                            item,
255                                                            "SMAC is invalid");
256                                         PMD_DRV_LOG(ERR,
257                                                     "SMAC is invalid!\n");
258                                         return -rte_errno;
259                                 }
260                                 rte_memcpy(filter->src_macaddr,
261                                            &eth_spec->src, RTE_ETHER_ADDR_LEN);
262                                 en |= use_ntuple ?
263                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
264                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
265                                 valid_flags |= inner ?
266                                         BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
267                                         BNXT_FLOW_L2_SRC_VALID_FLAG;
268                         } /*
269                            * else {
270                            *  PMD_DRV_LOG(ERR, "Handle this condition\n");
271                            * }
272                            */
273                         if (eth_mask->type) {
274                                 filter->ethertype =
275                                         rte_be_to_cpu_16(eth_spec->type);
276                                 en |= en_ethertype;
277                         }
278                         if (inner)
279                                 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
280
281                         break;
282                 case RTE_FLOW_ITEM_TYPE_VLAN:
283                         vlan_spec = item->spec;
284                         vlan_mask = item->mask;
285                         if (en & en_ethertype) {
286                                 rte_flow_error_set(error, EINVAL,
287                                                    RTE_FLOW_ERROR_TYPE_ITEM,
288                                                    item,
289                                                    "VLAN TPID matching is not"
290                                                    " supported");
291                                 return -rte_errno;
292                         }
293                         if (vlan_mask->tci &&
294                             vlan_mask->tci == RTE_BE16(0x0fff)) {
295                                 /* Only the VLAN ID can be matched. */
296                                 filter->l2_ovlan =
297                                         rte_be_to_cpu_16(vlan_spec->tci &
298                                                          RTE_BE16(0x0fff));
299                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
300                         } else {
301                                 rte_flow_error_set(error,
302                                                    EINVAL,
303                                                    RTE_FLOW_ERROR_TYPE_ITEM,
304                                                    item,
305                                                    "VLAN mask is invalid");
306                                 return -rte_errno;
307                         }
308                         if (vlan_mask->inner_type &&
309                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
310                                 rte_flow_error_set(error, EINVAL,
311                                                    RTE_FLOW_ERROR_TYPE_ITEM,
312                                                    item,
313                                                    "inner ethertype mask not"
314                                                    " valid");
315                                 return -rte_errno;
316                         }
317                         if (vlan_mask->inner_type) {
318                                 filter->ethertype =
319                                         rte_be_to_cpu_16(vlan_spec->inner_type);
320                                 en |= en_ethertype;
321                         }
322
323                         break;
324                 case RTE_FLOW_ITEM_TYPE_IPV4:
325                         /* If mask is not involved, we could use EM filters. */
326                         ipv4_spec = item->spec;
327                         ipv4_mask = item->mask;
328
329                         if (!item->spec || !item->mask)
330                                 break;
331
332                         /* Only IP DST and SRC fields are maskable. */
333                         if (ipv4_mask->hdr.version_ihl ||
334                             ipv4_mask->hdr.type_of_service ||
335                             ipv4_mask->hdr.total_length ||
336                             ipv4_mask->hdr.packet_id ||
337                             ipv4_mask->hdr.fragment_offset ||
338                             ipv4_mask->hdr.time_to_live ||
339                             ipv4_mask->hdr.next_proto_id ||
340                             ipv4_mask->hdr.hdr_checksum) {
341                                 rte_flow_error_set(error,
342                                                    EINVAL,
343                                                    RTE_FLOW_ERROR_TYPE_ITEM,
344                                                    item,
345                                                    "Invalid IPv4 mask.");
346                                 return -rte_errno;
347                         }
348
349                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
350                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
351
352                         if (use_ntuple)
353                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
354                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
355                         else
356                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
357                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
358
359                         if (ipv4_mask->hdr.src_addr) {
360                                 filter->src_ipaddr_mask[0] =
361                                         ipv4_mask->hdr.src_addr;
362                                 en |= !use_ntuple ? 0 :
363                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
364                         }
365
366                         if (ipv4_mask->hdr.dst_addr) {
367                                 filter->dst_ipaddr_mask[0] =
368                                         ipv4_mask->hdr.dst_addr;
369                                 en |= !use_ntuple ? 0 :
370                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
371                         }
372
373                         filter->ip_addr_type = use_ntuple ?
374                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
375                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
376
377                         if (ipv4_spec->hdr.next_proto_id) {
378                                 filter->ip_protocol =
379                                         ipv4_spec->hdr.next_proto_id;
380                                 if (use_ntuple)
381                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
382                                 else
383                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
384                         }
385                         break;
386                 case RTE_FLOW_ITEM_TYPE_IPV6:
387                         ipv6_spec = item->spec;
388                         ipv6_mask = item->mask;
389
390                         if (!item->spec || !item->mask)
391                                 break;
392
393                         /* Only IP DST and SRC fields are maskable. */
394                         if (ipv6_mask->hdr.vtc_flow ||
395                             ipv6_mask->hdr.payload_len ||
396                             ipv6_mask->hdr.proto ||
397                             ipv6_mask->hdr.hop_limits) {
398                                 rte_flow_error_set(error,
399                                                    EINVAL,
400                                                    RTE_FLOW_ERROR_TYPE_ITEM,
401                                                    item,
402                                                    "Invalid IPv6 mask.");
403                                 return -rte_errno;
404                         }
405
406                         if (use_ntuple)
407                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
408                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
409                         else
410                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
411                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
412
413                         rte_memcpy(filter->src_ipaddr,
414                                    ipv6_spec->hdr.src_addr, 16);
415                         rte_memcpy(filter->dst_ipaddr,
416                                    ipv6_spec->hdr.dst_addr, 16);
417
418                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
419                                                    16)) {
420                                 rte_memcpy(filter->src_ipaddr_mask,
421                                            ipv6_mask->hdr.src_addr, 16);
422                                 en |= !use_ntuple ? 0 :
423                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
424                         }
425
426                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
427                                                    16)) {
428                                 rte_memcpy(filter->dst_ipaddr_mask,
429                                            ipv6_mask->hdr.dst_addr, 16);
430                                 en |= !use_ntuple ? 0 :
431                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
432                         }
433
434                         filter->ip_addr_type = use_ntuple ?
435                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
436                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
437                         break;
438                 case RTE_FLOW_ITEM_TYPE_TCP:
439                         tcp_spec = item->spec;
440                         tcp_mask = item->mask;
441
442                         if (!item->spec || !item->mask)
443                                 break;
444
445                         /* Check TCP mask. Only DST & SRC ports are maskable */
446                         if (tcp_mask->hdr.sent_seq ||
447                             tcp_mask->hdr.recv_ack ||
448                             tcp_mask->hdr.data_off ||
449                             tcp_mask->hdr.tcp_flags ||
450                             tcp_mask->hdr.rx_win ||
451                             tcp_mask->hdr.cksum ||
452                             tcp_mask->hdr.tcp_urp) {
453                                 rte_flow_error_set(error,
454                                                    EINVAL,
455                                                    RTE_FLOW_ERROR_TYPE_ITEM,
456                                                    item,
457                                                    "Invalid TCP mask");
458                                 return -rte_errno;
459                         }
460
461                         filter->src_port = tcp_spec->hdr.src_port;
462                         filter->dst_port = tcp_spec->hdr.dst_port;
463
464                         if (use_ntuple)
465                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
466                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
467                         else
468                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
469                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
470
471                         if (tcp_mask->hdr.dst_port) {
472                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
473                                 en |= !use_ntuple ? 0 :
474                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
475                         }
476
477                         if (tcp_mask->hdr.src_port) {
478                                 filter->src_port_mask = tcp_mask->hdr.src_port;
479                                 en |= !use_ntuple ? 0 :
480                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
481                         }
482                         break;
483                 case RTE_FLOW_ITEM_TYPE_UDP:
484                         udp_spec = item->spec;
485                         udp_mask = item->mask;
486
487                         if (!item->spec || !item->mask)
488                                 break;
489
490                         if (udp_mask->hdr.dgram_len ||
491                             udp_mask->hdr.dgram_cksum) {
492                                 rte_flow_error_set(error,
493                                                    EINVAL,
494                                                    RTE_FLOW_ERROR_TYPE_ITEM,
495                                                    item,
496                                                    "Invalid UDP mask");
497                                 return -rte_errno;
498                         }
499
500                         filter->src_port = udp_spec->hdr.src_port;
501                         filter->dst_port = udp_spec->hdr.dst_port;
502
503                         if (use_ntuple)
504                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
505                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
506                         else
507                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
508                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
509
510                         if (udp_mask->hdr.dst_port) {
511                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
512                                 en |= !use_ntuple ? 0 :
513                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
514                         }
515
516                         if (udp_mask->hdr.src_port) {
517                                 filter->src_port_mask = udp_mask->hdr.src_port;
518                                 en |= !use_ntuple ? 0 :
519                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
520                         }
521                         break;
522                 case RTE_FLOW_ITEM_TYPE_VXLAN:
523                         vxlan_spec = item->spec;
524                         vxlan_mask = item->mask;
525                         /* Check if VXLAN item is used to describe protocol.
526                          * If yes, both spec and mask should be NULL.
527                          * If no, both spec and mask shouldn't be NULL.
528                          */
529                         if ((!vxlan_spec && vxlan_mask) ||
530                             (vxlan_spec && !vxlan_mask)) {
531                                 rte_flow_error_set(error,
532                                                    EINVAL,
533                                                    RTE_FLOW_ERROR_TYPE_ITEM,
534                                                    item,
535                                                    "Invalid VXLAN item");
536                                 return -rte_errno;
537                         }
538
539                         if (!vxlan_spec && !vxlan_mask) {
540                                 filter->tunnel_type =
541                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
542                                 break;
543                         }
544
545                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
546                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
547                             vxlan_spec->flags != 0x8) {
548                                 rte_flow_error_set(error,
549                                                    EINVAL,
550                                                    RTE_FLOW_ERROR_TYPE_ITEM,
551                                                    item,
552                                                    "Invalid VXLAN item");
553                                 return -rte_errno;
554                         }
555
556                         /* Check if VNI is masked. */
557                         if (vxlan_spec && vxlan_mask) {
558                                 vni_masked =
559                                         !!memcmp(vxlan_mask->vni, vni_mask,
560                                                  RTE_DIM(vni_mask));
561                                 if (vni_masked) {
562                                         rte_flow_error_set
563                                                 (error,
564                                                  EINVAL,
565                                                  RTE_FLOW_ERROR_TYPE_ITEM,
566                                                  item,
567                                                  "Invalid VNI mask");
568                                         return -rte_errno;
569                                 }
570
571                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
572                                            vxlan_spec->vni, 3);
573                                 filter->vni =
574                                         rte_be_to_cpu_32(tenant_id_be);
575                                 filter->tunnel_type =
576                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
577                         }
578                         break;
579                 case RTE_FLOW_ITEM_TYPE_NVGRE:
580                         nvgre_spec = item->spec;
581                         nvgre_mask = item->mask;
582                         /* Check if NVGRE item is used to describe protocol.
583                          * If yes, both spec and mask should be NULL.
584                          * If no, both spec and mask shouldn't be NULL.
585                          */
586                         if ((!nvgre_spec && nvgre_mask) ||
587                             (nvgre_spec && !nvgre_mask)) {
588                                 rte_flow_error_set(error,
589                                                    EINVAL,
590                                                    RTE_FLOW_ERROR_TYPE_ITEM,
591                                                    item,
592                                                    "Invalid NVGRE item");
593                                 return -rte_errno;
594                         }
595
596                         if (!nvgre_spec && !nvgre_mask) {
597                                 filter->tunnel_type =
598                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
599                                 break;
600                         }
601
602                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
603                             nvgre_spec->protocol != 0x6558) {
604                                 rte_flow_error_set(error,
605                                                    EINVAL,
606                                                    RTE_FLOW_ERROR_TYPE_ITEM,
607                                                    item,
608                                                    "Invalid NVGRE item");
609                                 return -rte_errno;
610                         }
611
612                         if (nvgre_spec && nvgre_mask) {
613                                 tni_masked =
614                                         !!memcmp(nvgre_mask->tni, tni_mask,
615                                                  RTE_DIM(tni_mask));
616                                 if (tni_masked) {
617                                         rte_flow_error_set
618                                                 (error,
619                                                  EINVAL,
620                                                  RTE_FLOW_ERROR_TYPE_ITEM,
621                                                  item,
622                                                  "Invalid TNI mask");
623                                         return -rte_errno;
624                                 }
625                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
626                                            nvgre_spec->tni, 3);
627                                 filter->vni =
628                                         rte_be_to_cpu_32(tenant_id_be);
629                                 filter->tunnel_type =
630                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
631                         }
632                         break;
633
634                 case RTE_FLOW_ITEM_TYPE_GRE:
635                         gre_spec = (const struct rte_flow_item_gre *)item->spec;
636                         gre_mask = (const struct rte_flow_item_gre *)item->mask;
637
638                         /*
639                          *Check if GRE item is used to describe protocol.
640                          * If yes, both spec and mask should be NULL.
641                          * If no, both spec and mask shouldn't be NULL.
642                          */
643                         if (!!gre_spec ^ !!gre_mask) {
644                                 rte_flow_error_set(error, EINVAL,
645                                                    RTE_FLOW_ERROR_TYPE_ITEM,
646                                                    item,
647                                                    "Invalid GRE item");
648                                 return -rte_errno;
649                         }
650
651                         if (!gre_spec && !gre_mask) {
652                                 filter->tunnel_type =
653                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
654                                 break;
655                         }
656                         break;
657
658                 case RTE_FLOW_ITEM_TYPE_VF:
659                         vf_spec = item->spec;
660                         vf = vf_spec->id;
661                         if (!BNXT_PF(bp)) {
662                                 rte_flow_error_set(error,
663                                                    EINVAL,
664                                                    RTE_FLOW_ERROR_TYPE_ITEM,
665                                                    item,
666                                                    "Configuring on a VF!");
667                                 return -rte_errno;
668                         }
669
670                         if (vf >= bp->pdev->max_vfs) {
671                                 rte_flow_error_set(error,
672                                                    EINVAL,
673                                                    RTE_FLOW_ERROR_TYPE_ITEM,
674                                                    item,
675                                                    "Incorrect VF id!");
676                                 return -rte_errno;
677                         }
678
679                         if (!attr->transfer) {
680                                 rte_flow_error_set(error,
681                                                    ENOTSUP,
682                                                    RTE_FLOW_ERROR_TYPE_ITEM,
683                                                    item,
684                                                    "Matching VF traffic without"
685                                                    " affecting it (transfer attribute)"
686                                                    " is unsupported");
687                                 return -rte_errno;
688                         }
689
690                         filter->mirror_vnic_id =
691                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
692                         if (dflt_vnic < 0) {
693                                 /* This simply indicates there's no driver
694                                  * loaded. This is not an error.
695                                  */
696                                 rte_flow_error_set
697                                         (error,
698                                          EINVAL,
699                                          RTE_FLOW_ERROR_TYPE_ITEM,
700                                          item,
701                                          "Unable to get default VNIC for VF");
702                                 return -rte_errno;
703                         }
704
705                         filter->mirror_vnic_id = dflt_vnic;
706                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
707                         break;
708                 default:
709                         break;
710                 }
711                 item++;
712         }
713         filter->enables = en;
714         filter->valid_flags = valid_flags;
715
716         return 0;
717 }
718
719 /* Parse attributes */
720 static int
721 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
722                      struct rte_flow_error *error)
723 {
724         /* Must be input direction */
725         if (!attr->ingress) {
726                 rte_flow_error_set(error,
727                                    EINVAL,
728                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
729                                    attr,
730                                    "Only support ingress.");
731                 return -rte_errno;
732         }
733
734         /* Not supported */
735         if (attr->egress) {
736                 rte_flow_error_set(error,
737                                    EINVAL,
738                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
739                                    attr,
740                                    "No support for egress.");
741                 return -rte_errno;
742         }
743
744         return 0;
745 }
746
747 static struct bnxt_filter_info *
748 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
749 {
750         struct bnxt_filter_info *mf, *f0;
751         struct bnxt_vnic_info *vnic0;
752         int i;
753
754         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
755         f0 = STAILQ_FIRST(&vnic0->filter);
756
757         /* This flow has same DST MAC as the port/l2 filter. */
758         if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
759                 return f0;
760
761         for (i = bp->max_vnics - 1; i >= 0; i--) {
762                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
763
764                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
765                         continue;
766
767                 STAILQ_FOREACH(mf, &vnic->filter, next) {
768
769                         if (mf->matching_l2_fltr_ptr)
770                                 continue;
771
772                         if (mf->ethertype == nf->ethertype &&
773                             mf->l2_ovlan == nf->l2_ovlan &&
774                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
775                             mf->l2_ivlan == nf->l2_ivlan &&
776                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
777                             !memcmp(mf->src_macaddr, nf->src_macaddr,
778                                     RTE_ETHER_ADDR_LEN) &&
779                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
780                                     RTE_ETHER_ADDR_LEN))
781                                 return mf;
782                 }
783         }
784         return NULL;
785 }
786
787 static struct bnxt_filter_info *
788 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
789                       struct bnxt_vnic_info *vnic)
790 {
791         struct bnxt_filter_info *filter1;
792         int rc;
793
794         /* Alloc new L2 filter.
795          * This flow needs MAC filter which does not match any existing
796          * L2 filters.
797          */
798         filter1 = bnxt_get_unused_filter(bp);
799         if (filter1 == NULL)
800                 return NULL;
801
802         memcpy(filter1, nf, sizeof(*filter1));
803
804         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
805         filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
806         if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
807             nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
808                 filter1->flags |=
809                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
810                 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
811         }
812
813         if (nf->filter_type == HWRM_CFA_L2_FILTER &&
814             (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
815              nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
816                 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
817                 filter1->flags |=
818                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
819                 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
820         } else {
821                 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
822                 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
823         }
824
825         if (nf->priority &&
826             (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
827              nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
828                 /* Tell the FW where to place the filter in the table. */
829                 if (nf->priority > 65535) {
830                         filter1->pri_hint =
831                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
832                         /* This will place the filter in TCAM */
833                         filter1->l2_filter_id_hint = (uint64_t)-1;
834                 }
835         }
836
837         if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
838                                BNXT_FLOW_L2_SRC_VALID_FLAG |
839                                BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
840                                BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
841                 filter1->enables =
842                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
843                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
844                 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
845         }
846
847         if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
848                 filter1->flags |=
849                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
850                 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
851                         /* Num VLANs for drop filter will/should be 0.
852                          * If the req is memset to 0, then the count will
853                          * be automatically set to 0.
854                          */
855                         if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
856                                 filter1->enables |=
857                                         L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
858                         } else {
859                                 filter1->enables |=
860                                         L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
861                                 filter1->flags |=
862                                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
863                         }
864                 }
865         }
866
867         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
868                                      filter1);
869         if (rc) {
870                 bnxt_free_filter(bp, filter1);
871                 return NULL;
872         }
873         return filter1;
874 }
875
876 struct bnxt_filter_info *
877 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
878                    struct bnxt_vnic_info *vnic)
879 {
880         struct bnxt_filter_info *l2_filter = NULL;
881
882         l2_filter = bnxt_find_matching_l2_filter(bp, nf);
883         if (l2_filter) {
884                 l2_filter->l2_ref_cnt++;
885         } else {
886                 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
887                 if (l2_filter) {
888                         STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next);
889                         l2_filter->vnic = vnic;
890                 }
891         }
892         nf->matching_l2_fltr_ptr = l2_filter;
893
894         return l2_filter;
895 }
896
897 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
898 {
899         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
900         uint64_t rx_offloads = dev_conf->rxmode.offloads;
901         int rc;
902
903         rc = bnxt_vnic_grp_alloc(bp, vnic);
904         if (rc)
905                 goto ret;
906
907         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
908         if (rc) {
909                 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
910                 goto ret;
911         }
912         bp->nr_vnics++;
913
914         /* RSS context is required only when there is more than one RSS ring */
915         if (vnic->rx_queue_cnt > 1) {
916                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
917                 if (rc) {
918                         PMD_DRV_LOG(ERR,
919                                     "HWRM vnic ctx alloc failure: %x\n", rc);
920                         goto ret;
921                 }
922         } else {
923                 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
924         }
925
926         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
927                 vnic->vlan_strip = true;
928         else
929                 vnic->vlan_strip = false;
930
931         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
932         if (rc)
933                 goto ret;
934
935         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
936
937 ret:
938         return rc;
939 }
940
941 static int match_vnic_rss_cfg(struct bnxt *bp,
942                               struct bnxt_vnic_info *vnic,
943                               const struct rte_flow_action_rss *rss)
944 {
945         unsigned int match = 0, i;
946
947         if (vnic->rx_queue_cnt != rss->queue_num)
948                 return -EINVAL;
949
950         for (i = 0; i < rss->queue_num; i++) {
951                 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
952                     !bp->rx_queues[rss->queue[i]]->rx_started)
953                         return -EINVAL;
954         }
955
956         for (i = 0; i < vnic->rx_queue_cnt; i++) {
957                 int j;
958
959                 for (j = 0; j < vnic->rx_queue_cnt; j++) {
960                         if (bp->grp_info[rss->queue[i]].fw_grp_id ==
961                             vnic->fw_grp_ids[j])
962                                 match++;
963                 }
964         }
965
966         if (match != vnic->rx_queue_cnt) {
967                 PMD_DRV_LOG(ERR,
968                             "VNIC queue count %d vs queues matched %d\n",
969                             match, vnic->rx_queue_cnt);
970                 return -EINVAL;
971         }
972
973         return 0;
974 }
975
976 static void
977 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
978                             struct bnxt_filter_info *filter1,
979                             int use_ntuple)
980 {
981         if (!use_ntuple &&
982             !(filter->valid_flags &
983               ~(BNXT_FLOW_L2_DST_VALID_FLAG |
984                 BNXT_FLOW_L2_SRC_VALID_FLAG |
985                 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
986                 BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
987                 BNXT_FLOW_L2_DROP_FLAG |
988                 BNXT_FLOW_PARSE_INNER_FLAG))) {
989                 filter->flags = filter1->flags;
990                 filter->enables = filter1->enables;
991                 filter->filter_type = HWRM_CFA_L2_FILTER;
992                 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
993                 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
994                 filter->pri_hint = filter1->pri_hint;
995                 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
996         }
997         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
998         filter->l2_ref_cnt = filter1->l2_ref_cnt;
999         filter->flow_id = filter1->flow_id;
1000         PMD_DRV_LOG(DEBUG,
1001                 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
1002                 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1003 }
1004
1005 static int
1006 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1007                              const struct rte_flow_item pattern[],
1008                              const struct rte_flow_action actions[],
1009                              const struct rte_flow_attr *attr,
1010                              struct rte_flow_error *error,
1011                              struct bnxt_filter_info *filter)
1012 {
1013         const struct rte_flow_action *act =
1014                 bnxt_flow_non_void_action(actions);
1015         struct bnxt *bp = dev->data->dev_private;
1016         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1017         struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1018         const struct rte_flow_action_queue *act_q;
1019         const struct rte_flow_action_vf *act_vf;
1020         struct bnxt_filter_info *filter1 = NULL;
1021         const struct rte_flow_action_rss *rss;
1022         struct bnxt_rx_queue *rxq = NULL;
1023         int dflt_vnic, vnic_id;
1024         unsigned int rss_idx;
1025         uint32_t vf = 0, i;
1026         int rc, use_ntuple;
1027
1028         rc =
1029         bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1030         if (rc != 0)
1031                 goto ret;
1032
1033         rc = bnxt_flow_parse_attr(attr, error);
1034         if (rc != 0)
1035                 goto ret;
1036
1037         /* Since we support ingress attribute only - right now. */
1038         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1039                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1040
1041         use_ntuple = bnxt_filter_type_check(pattern, error);
1042
1043 start:
1044         switch (act->type) {
1045         case RTE_FLOW_ACTION_TYPE_QUEUE:
1046                 /* Allow this flow. Redirect to a VNIC. */
1047                 act_q = (const struct rte_flow_action_queue *)act->conf;
1048                 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1049                         rte_flow_error_set(error,
1050                                            EINVAL,
1051                                            RTE_FLOW_ERROR_TYPE_ACTION,
1052                                            act,
1053                                            "Invalid queue ID.");
1054                         rc = -rte_errno;
1055                         goto ret;
1056                 }
1057                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1058
1059                 vnic_id = attr->group;
1060                 if (!vnic_id) {
1061                         PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1062                         vnic_id = act_q->index;
1063                 }
1064
1065                 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1066
1067                 vnic = &bp->vnic_info[vnic_id];
1068                 if (vnic->rx_queue_cnt) {
1069                         if (vnic->start_grp_id != act_q->index) {
1070                                 PMD_DRV_LOG(ERR,
1071                                             "VNIC already in use\n");
1072                                 rte_flow_error_set(error,
1073                                                    EINVAL,
1074                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1075                                                    act,
1076                                                    "VNIC already in use");
1077                                 rc = -rte_errno;
1078                                 goto ret;
1079                         }
1080                         goto use_vnic;
1081                 }
1082
1083                 rxq = bp->rx_queues[act_q->index];
1084
1085                 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1086                     vnic->fw_vnic_id != INVALID_HW_RING_ID)
1087                         goto use_vnic;
1088
1089                 if (!rxq) {
1090                         PMD_DRV_LOG(ERR,
1091                                     "Queue invalid or used with other VNIC\n");
1092                         rte_flow_error_set(error,
1093                                            EINVAL,
1094                                            RTE_FLOW_ERROR_TYPE_ACTION,
1095                                            act,
1096                                            "Queue invalid queue or in use");
1097                         rc = -rte_errno;
1098                         goto ret;
1099                 }
1100
1101                 rxq->vnic = vnic;
1102                 rxq->rx_started = 1;
1103                 vnic->rx_queue_cnt++;
1104                 vnic->start_grp_id = act_q->index;
1105                 vnic->end_grp_id = act_q->index;
1106                 vnic->func_default = 0; //This is not a default VNIC.
1107
1108                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1109
1110                 rc = bnxt_vnic_prep(bp, vnic);
1111                 if (rc)  {
1112                         rte_flow_error_set(error,
1113                                            EINVAL,
1114                                            RTE_FLOW_ERROR_TYPE_ACTION,
1115                                            act,
1116                                            "VNIC prep fail");
1117                         rc = -rte_errno;
1118                         goto ret;
1119                 }
1120
1121                 PMD_DRV_LOG(DEBUG,
1122                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1123                             act_q->index, vnic, vnic->fw_grp_ids);
1124
1125 use_vnic:
1126                 vnic->ff_pool_idx = vnic_id;
1127                 PMD_DRV_LOG(DEBUG,
1128                             "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1129                 filter->dst_id = vnic->fw_vnic_id;
1130
1131                 /* For ntuple filter, create the L2 filter with default VNIC.
1132                  * The user specified redirect queue will be set while creating
1133                  * the ntuple filter in hardware.
1134                  */
1135                 vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
1136                 if (use_ntuple)
1137                         filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1138                 else
1139                         filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1140                 if (filter1 == NULL) {
1141                         rte_flow_error_set(error,
1142                                            ENOSPC,
1143                                            RTE_FLOW_ERROR_TYPE_ACTION,
1144                                            act,
1145                                            "Filter not available");
1146                         rc = -rte_errno;
1147                         goto ret;
1148                 }
1149
1150                 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1151                             filter, filter1, filter1->l2_ref_cnt);
1152                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1153                 break;
1154         case RTE_FLOW_ACTION_TYPE_DROP:
1155                 vnic0 = &bp->vnic_info[0];
1156                 filter->dst_id = vnic0->fw_vnic_id;
1157                 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1158                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1159                 if (filter1 == NULL) {
1160                         rte_flow_error_set(error,
1161                                            ENOSPC,
1162                                            RTE_FLOW_ERROR_TYPE_ACTION,
1163                                            act,
1164                                            "Filter not available");
1165                         rc = -rte_errno;
1166                         goto ret;
1167                 }
1168
1169                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1170                         filter->flags =
1171                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1172                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1173                         filter->flags =
1174                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1175
1176                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1177                 break;
1178         case RTE_FLOW_ACTION_TYPE_COUNT:
1179                 vnic0 = &bp->vnic_info[0];
1180                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1181                 if (filter1 == NULL) {
1182                         rte_flow_error_set(error,
1183                                            ENOSPC,
1184                                            RTE_FLOW_ERROR_TYPE_ACTION,
1185                                            act,
1186                                            "New filter not available");
1187                         rc = -rte_errno;
1188                         goto ret;
1189                 }
1190
1191                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1192                 filter->flow_id = filter1->flow_id;
1193                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1194                 break;
1195         case RTE_FLOW_ACTION_TYPE_VF:
1196                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1197                 vf = act_vf->id;
1198
1199                 if (filter->tunnel_type ==
1200                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1201                     filter->tunnel_type ==
1202                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1203                         /* If issued on a VF, ensure id is 0 and is trusted */
1204                         if (BNXT_VF(bp)) {
1205                                 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1206                                         rte_flow_error_set(error, EINVAL,
1207                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1208                                                 act,
1209                                                 "Incorrect VF");
1210                                         rc = -rte_errno;
1211                                         goto ret;
1212                                 }
1213                         }
1214
1215                         filter->enables |= filter->tunnel_type;
1216                         filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1217                         goto done;
1218                 }
1219
1220                 if (vf >= bp->pdev->max_vfs) {
1221                         rte_flow_error_set(error,
1222                                            EINVAL,
1223                                            RTE_FLOW_ERROR_TYPE_ACTION,
1224                                            act,
1225                                            "Incorrect VF id!");
1226                         rc = -rte_errno;
1227                         goto ret;
1228                 }
1229
1230                 filter->mirror_vnic_id =
1231                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1232                 if (dflt_vnic < 0) {
1233                         /* This simply indicates there's no driver loaded.
1234                          * This is not an error.
1235                          */
1236                         rte_flow_error_set(error,
1237                                            EINVAL,
1238                                            RTE_FLOW_ERROR_TYPE_ACTION,
1239                                            act,
1240                                            "Unable to get default VNIC for VF");
1241                         rc = -rte_errno;
1242                         goto ret;
1243                 }
1244
1245                 filter->mirror_vnic_id = dflt_vnic;
1246                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1247
1248                 vnic0 = &bp->vnic_info[0];
1249                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1250                 if (filter1 == NULL) {
1251                         rte_flow_error_set(error,
1252                                            ENOSPC,
1253                                            RTE_FLOW_ERROR_TYPE_ACTION,
1254                                            act,
1255                                            "New filter not available");
1256                         rc = -rte_errno;
1257                         goto ret;
1258                 }
1259
1260                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1261                 filter->flow_id = filter1->flow_id;
1262                 break;
1263         case RTE_FLOW_ACTION_TYPE_RSS:
1264                 rss = (const struct rte_flow_action_rss *)act->conf;
1265
1266                 vnic_id = attr->group;
1267
1268                 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1269                 vnic = &bp->vnic_info[vnic_id];
1270
1271                 /* Check if requested RSS config matches RSS config of VNIC
1272                  * only if it is not a fresh VNIC configuration.
1273                  * Otherwise the existing VNIC configuration can be used.
1274                  */
1275                 if (vnic->rx_queue_cnt) {
1276                         rc = match_vnic_rss_cfg(bp, vnic, rss);
1277                         if (rc) {
1278                                 PMD_DRV_LOG(ERR,
1279                                             "VNIC and RSS config mismatch\n");
1280                                 rte_flow_error_set(error,
1281                                                    EINVAL,
1282                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1283                                                    act,
1284                                                    "VNIC and RSS cfg mismatch");
1285                                 rc = -rte_errno;
1286                                 goto ret;
1287                         }
1288                         goto vnic_found;
1289                 }
1290
1291                 for (i = 0; i < rss->queue_num; i++) {
1292                         PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1293                                     rss->queue[i]);
1294
1295                         if (!rss->queue[i] ||
1296                             rss->queue[i] >= bp->rx_nr_rings ||
1297                             !bp->rx_queues[rss->queue[i]]) {
1298                                 rte_flow_error_set(error,
1299                                                    EINVAL,
1300                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1301                                                    act,
1302                                                    "Invalid queue ID for RSS");
1303                                 rc = -rte_errno;
1304                                 goto ret;
1305                         }
1306                         rxq = bp->rx_queues[rss->queue[i]];
1307
1308                         if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1309                             INVALID_HW_RING_ID) {
1310                                 PMD_DRV_LOG(ERR,
1311                                             "queue active with other VNIC\n");
1312                                 rte_flow_error_set(error,
1313                                                    EINVAL,
1314                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1315                                                    act,
1316                                                    "Invalid queue ID for RSS");
1317                                 rc = -rte_errno;
1318                                 goto ret;
1319                         }
1320
1321                         rxq->vnic = vnic;
1322                         rxq->rx_started = 1;
1323                         vnic->rx_queue_cnt++;
1324                 }
1325
1326                 vnic->start_grp_id = rss->queue[0];
1327                 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1328                 vnic->func_default = 0; //This is not a default VNIC.
1329
1330                 rc = bnxt_vnic_prep(bp, vnic);
1331                 if (rc) {
1332                         rte_flow_error_set(error,
1333                                            EINVAL,
1334                                            RTE_FLOW_ERROR_TYPE_ACTION,
1335                                            act,
1336                                            "VNIC prep fail");
1337                         rc = -rte_errno;
1338                         goto ret;
1339                 }
1340
1341                 PMD_DRV_LOG(DEBUG,
1342                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1343                             vnic_id, vnic, vnic->fw_grp_ids);
1344
1345                 vnic->ff_pool_idx = vnic_id;
1346                 PMD_DRV_LOG(DEBUG,
1347                             "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1348
1349                 /* This can be done only after vnic_grp_alloc is done. */
1350                 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1351                         vnic->fw_grp_ids[i] =
1352                                 bp->grp_info[rss->queue[i]].fw_grp_id;
1353                         /* Make sure vnic0 does not use these rings. */
1354                         bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1355                                 INVALID_HW_RING_ID;
1356                 }
1357
1358                 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1359                         for (i = 0; i < vnic->rx_queue_cnt; i++)
1360                                 vnic->rss_table[rss_idx++] =
1361                                         vnic->fw_grp_ids[i];
1362                 }
1363
1364                 /* Configure RSS only if the queue count is > 1 */
1365                 if (vnic->rx_queue_cnt > 1) {
1366                         vnic->hash_type =
1367                                 bnxt_rte_to_hwrm_hash_types(rss->types);
1368
1369                         if (!rss->key_len) {
1370                                 /* If hash key has not been specified,
1371                                  * use random hash key.
1372                                  */
1373                                 prandom_bytes(vnic->rss_hash_key,
1374                                               HW_HASH_KEY_SIZE);
1375                         } else {
1376                                 if (rss->key_len > HW_HASH_KEY_SIZE)
1377                                         memcpy(vnic->rss_hash_key,
1378                                                rss->key,
1379                                                HW_HASH_KEY_SIZE);
1380                                 else
1381                                         memcpy(vnic->rss_hash_key,
1382                                                rss->key,
1383                                                rss->key_len);
1384                         }
1385                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1386                 } else {
1387                         PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1388                 }
1389
1390 vnic_found:
1391                 filter->dst_id = vnic->fw_vnic_id;
1392                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1393                 if (filter1 == NULL) {
1394                         rte_flow_error_set(error,
1395                                            ENOSPC,
1396                                            RTE_FLOW_ERROR_TYPE_ACTION,
1397                                            act,
1398                                            "New filter not available");
1399                         rc = -rte_errno;
1400                         goto ret;
1401                 }
1402
1403                 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1404                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1405                 break;
1406         case RTE_FLOW_ACTION_TYPE_MARK:
1407                 if (bp->mark_table == NULL) {
1408                         rte_flow_error_set(error,
1409                                            ENOMEM,
1410                                            RTE_FLOW_ERROR_TYPE_ACTION,
1411                                            act,
1412                                            "Mark table not allocated.");
1413                         rc = -rte_errno;
1414                         goto ret;
1415                 }
1416
1417                 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
1418                         PMD_DRV_LOG(DEBUG,
1419                                     "Disabling vector processing for mark\n");
1420                         bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts;
1421                         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1422                 }
1423
1424                 filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
1425                 filter->mark = ((const struct rte_flow_action_mark *)
1426                                 act->conf)->id;
1427                 PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
1428                 break;
1429         default:
1430                 rte_flow_error_set(error,
1431                                    EINVAL,
1432                                    RTE_FLOW_ERROR_TYPE_ACTION,
1433                                    act,
1434                                    "Invalid action.");
1435                 rc = -rte_errno;
1436                 goto ret;
1437         }
1438
1439 done:
1440         act = bnxt_flow_non_void_action(++act);
1441         while (act->type != RTE_FLOW_ACTION_TYPE_END)
1442                 goto start;
1443
1444         return rc;
1445 ret:
1446
1447         if (filter1) {
1448                 bnxt_hwrm_clear_l2_filter(bp, filter1);
1449                 bnxt_free_filter(bp, filter1);
1450         }
1451
1452         if (rte_errno)  {
1453                 if (vnic && STAILQ_EMPTY(&vnic->filter))
1454                         vnic->rx_queue_cnt = 0;
1455
1456                 if (rxq && !vnic->rx_queue_cnt)
1457                         rxq->vnic = &bp->vnic_info[0];
1458         }
1459         return -rte_errno;
1460 }
1461
1462 static
1463 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1464                                           struct bnxt_filter_info *filter)
1465 {
1466         struct bnxt_vnic_info *vnic = NULL;
1467         unsigned int i;
1468
1469         for (i = 0; i < bp->max_vnics; i++) {
1470                 vnic = &bp->vnic_info[i];
1471                 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1472                     filter->dst_id == vnic->fw_vnic_id) {
1473                         PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1474                                     vnic->ff_pool_idx);
1475                         return vnic;
1476                 }
1477         }
1478         return NULL;
1479 }
1480
1481 static int
1482 bnxt_flow_validate(struct rte_eth_dev *dev,
1483                    const struct rte_flow_attr *attr,
1484                    const struct rte_flow_item pattern[],
1485                    const struct rte_flow_action actions[],
1486                    struct rte_flow_error *error)
1487 {
1488         struct bnxt *bp = dev->data->dev_private;
1489         struct bnxt_vnic_info *vnic = NULL;
1490         struct bnxt_filter_info *filter;
1491         int ret = 0;
1492
1493         bnxt_acquire_flow_lock(bp);
1494         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1495         if (ret != 0) {
1496                 bnxt_release_flow_lock(bp);
1497                 return ret;
1498         }
1499
1500         filter = bnxt_get_unused_filter(bp);
1501         if (filter == NULL) {
1502                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1503                 bnxt_release_flow_lock(bp);
1504                 return -ENOMEM;
1505         }
1506
1507         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1508                                            error, filter);
1509         if (ret)
1510                 goto exit;
1511
1512         vnic = find_matching_vnic(bp, filter);
1513         if (vnic) {
1514                 if (STAILQ_EMPTY(&vnic->filter)) {
1515                         rte_free(vnic->fw_grp_ids);
1516                         bnxt_hwrm_vnic_ctx_free(bp, vnic);
1517                         bnxt_hwrm_vnic_free(bp, vnic);
1518                         vnic->rx_queue_cnt = 0;
1519                         PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1520                 }
1521         }
1522
1523         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1524                 bnxt_hwrm_clear_em_filter(bp, filter);
1525         else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1526                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1527         else
1528                 bnxt_hwrm_clear_l2_filter(bp, filter);
1529
1530 exit:
1531         /* No need to hold on to this filter if we are just validating flow */
1532         bnxt_free_filter(bp, filter);
1533         bnxt_release_flow_lock(bp);
1534
1535         return ret;
1536 }
1537
1538 static void
1539 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1540                    struct bnxt_filter_info *new_filter)
1541 {
1542         /* Clear the new L2 filter that was created in the previous step in
1543          * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1544          * filter which points to the new destination queue and so we clear
1545          * the previous L2 filter. For ntuple filters, we are going to reuse
1546          * the old L2 filter and create new NTUPLE filter with this new
1547          * destination queue subsequently during bnxt_flow_create. So we
1548          * decrement the ref cnt of the L2 filter that would've been bumped
1549          * up previously in bnxt_validate_and_parse_flow as the old n-tuple
1550          * filter that was referencing it will be deleted now.
1551          */
1552         bnxt_hwrm_clear_l2_filter(bp, old_filter);
1553         if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1554                 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1555         } else {
1556                 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1557                         bnxt_hwrm_clear_em_filter(bp, old_filter);
1558                 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1559                         bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1560         }
1561 }
1562
1563 static int
1564 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1565 {
1566         struct bnxt_filter_info *mf;
1567         struct rte_flow *flow;
1568         int i;
1569
1570         for (i = bp->max_vnics - 1; i >= 0; i--) {
1571                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1572
1573                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1574                         continue;
1575
1576                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1577                         mf = flow->filter;
1578
1579                         if (mf->filter_type == nf->filter_type &&
1580                             mf->flags == nf->flags &&
1581                             mf->src_port == nf->src_port &&
1582                             mf->src_port_mask == nf->src_port_mask &&
1583                             mf->dst_port == nf->dst_port &&
1584                             mf->dst_port_mask == nf->dst_port_mask &&
1585                             mf->ip_protocol == nf->ip_protocol &&
1586                             mf->ip_addr_type == nf->ip_addr_type &&
1587                             mf->ethertype == nf->ethertype &&
1588                             mf->vni == nf->vni &&
1589                             mf->tunnel_type == nf->tunnel_type &&
1590                             mf->l2_ovlan == nf->l2_ovlan &&
1591                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1592                             mf->l2_ivlan == nf->l2_ivlan &&
1593                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1594                             !memcmp(mf->l2_addr, nf->l2_addr,
1595                                     RTE_ETHER_ADDR_LEN) &&
1596                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1597                                     RTE_ETHER_ADDR_LEN) &&
1598                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1599                                     RTE_ETHER_ADDR_LEN) &&
1600                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1601                                     RTE_ETHER_ADDR_LEN) &&
1602                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1603                                     sizeof(nf->src_ipaddr)) &&
1604                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1605                                     sizeof(nf->src_ipaddr_mask)) &&
1606                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1607                                     sizeof(nf->dst_ipaddr)) &&
1608                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1609                                     sizeof(nf->dst_ipaddr_mask))) {
1610                                 if (mf->dst_id == nf->dst_id)
1611                                         return -EEXIST;
1612                                 /* Free the old filter, update flow
1613                                  * with new filter
1614                                  */
1615                                 bnxt_update_filter(bp, mf, nf);
1616                                 STAILQ_REMOVE(&vnic->filter, mf,
1617                                               bnxt_filter_info, next);
1618                                 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1619                                 bnxt_free_filter(bp, mf);
1620                                 flow->filter = nf;
1621                                 return -EXDEV;
1622                         }
1623                 }
1624         }
1625         return 0;
1626 }
1627
1628 static void
1629 bnxt_setup_flow_counter(struct bnxt *bp)
1630 {
1631         if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
1632             !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) {
1633                 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1634                                   bnxt_flow_cnt_alarm_cb,
1635                                   (void *)bp);
1636                 bp->flags |= BNXT_FLAG_FC_THREAD;
1637         }
1638 }
1639
1640 void bnxt_flow_cnt_alarm_cb(void *arg)
1641 {
1642         int rc = 0;
1643         struct bnxt *bp = arg;
1644
1645         if (!bp->flow_stat->rx_fc_out_tbl.va) {
1646                 PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n");
1647                 bnxt_cancel_fc_thread(bp);
1648                 return;
1649         }
1650
1651         if (!bp->flow_stat->flow_count) {
1652                 bnxt_cancel_fc_thread(bp);
1653                 return;
1654         }
1655
1656         if (!bp->eth_dev->data->dev_started) {
1657                 bnxt_cancel_fc_thread(bp);
1658                 return;
1659         }
1660
1661         rc = bnxt_flow_stats_req(bp);
1662         if (rc) {
1663                 PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n");
1664                 return;
1665         }
1666
1667         rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1668                           bnxt_flow_cnt_alarm_cb,
1669                           (void *)bp);
1670 }
1671
1672
1673 static struct rte_flow *
1674 bnxt_flow_create(struct rte_eth_dev *dev,
1675                  const struct rte_flow_attr *attr,
1676                  const struct rte_flow_item pattern[],
1677                  const struct rte_flow_action actions[],
1678                  struct rte_flow_error *error)
1679 {
1680         struct bnxt *bp = dev->data->dev_private;
1681         struct bnxt_vnic_info *vnic = NULL;
1682         struct bnxt_filter_info *filter;
1683         bool update_flow = false;
1684         struct rte_flow *flow;
1685         int ret = 0;
1686         uint32_t tun_type, flow_id;
1687
1688         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1689                 rte_flow_error_set(error, EINVAL,
1690                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1691                                    "Failed to create flow, Not a Trusted VF!");
1692                 return NULL;
1693         }
1694
1695         if (!dev->data->dev_started) {
1696                 rte_flow_error_set(error,
1697                                    EINVAL,
1698                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1699                                    NULL,
1700                                    "Device must be started");
1701                 return NULL;
1702         }
1703
1704         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1705         if (!flow) {
1706                 rte_flow_error_set(error, ENOMEM,
1707                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1708                                    "Failed to allocate memory");
1709                 return flow;
1710         }
1711
1712         bnxt_acquire_flow_lock(bp);
1713         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1714         if (ret != 0) {
1715                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1716                 goto free_flow;
1717         }
1718
1719         filter = bnxt_get_unused_filter(bp);
1720         if (filter == NULL) {
1721                 rte_flow_error_set(error, ENOSPC,
1722                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1723                                    "Not enough resources for a new flow");
1724                 goto free_flow;
1725         }
1726
1727         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1728                                            error, filter);
1729         if (ret != 0)
1730                 goto free_filter;
1731
1732         ret = bnxt_match_filter(bp, filter);
1733         if (ret == -EEXIST) {
1734                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1735                 /* Clear the filter that was created as part of
1736                  * validate_and_parse_flow() above
1737                  */
1738                 bnxt_hwrm_clear_l2_filter(bp, filter);
1739                 goto free_filter;
1740         } else if (ret == -EXDEV) {
1741                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1742                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1743                 update_flow = true;
1744         }
1745
1746         /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1747          * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1748          * in such a case.
1749          */
1750         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1751             filter->enables == filter->tunnel_type) {
1752                 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1753                 if (ret) {
1754                         rte_flow_error_set(error, -ret,
1755                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1756                                            "Unable to query tunnel to VF");
1757                         goto free_filter;
1758                 }
1759                 if (tun_type == (1U << filter->tunnel_type)) {
1760                         ret =
1761                         bnxt_hwrm_tunnel_redirect_free(bp,
1762                                                        filter->tunnel_type);
1763                         if (ret) {
1764                                 PMD_DRV_LOG(ERR,
1765                                             "Unable to free existing tunnel\n");
1766                                 rte_flow_error_set(error, -ret,
1767                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1768                                                    NULL,
1769                                                    "Unable to free preexisting "
1770                                                    "tunnel on VF");
1771                                 goto free_filter;
1772                         }
1773                 }
1774                 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1775                 if (ret) {
1776                         rte_flow_error_set(error, -ret,
1777                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1778                                            "Unable to redirect tunnel to VF");
1779                         goto free_filter;
1780                 }
1781                 vnic = &bp->vnic_info[0];
1782                 goto done;
1783         }
1784
1785         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1786                 filter->enables |=
1787                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1788                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1789                 if (ret != 0) {
1790                         rte_flow_error_set(error, -ret,
1791                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1792                                            "Failed to create EM filter");
1793                         goto free_filter;
1794                 }
1795         }
1796
1797         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1798                 filter->enables |=
1799                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1800                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1801                 if (ret != 0) {
1802                         rte_flow_error_set(error, -ret,
1803                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1804                                            "Failed to create ntuple filter");
1805                         goto free_filter;
1806                 }
1807         }
1808
1809         vnic = find_matching_vnic(bp, filter);
1810 done:
1811         if (!ret || update_flow) {
1812                 flow->filter = filter;
1813                 flow->vnic = vnic;
1814                 if (update_flow) {
1815                         ret = -EXDEV;
1816                         goto free_flow;
1817                 }
1818
1819                 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1820                         PMD_DRV_LOG(DEBUG,
1821                                     "Mark action: mark id 0x%x, flow id 0x%x\n",
1822                                     filter->mark, filter->flow_id);
1823
1824                         /* TCAM and EM should be 16-bit only.
1825                          * Other modes not supported.
1826                          */
1827                         flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1828                         if (bp->mark_table[flow_id].valid) {
1829                                 rte_flow_error_set(error, EEXIST,
1830                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1831                                                    NULL,
1832                                                    "Flow with mark id exists");
1833                                 bnxt_clear_one_vnic_filter(bp, filter);
1834                                 goto free_filter;
1835                         }
1836                         bp->mark_table[flow_id].valid = true;
1837                         bp->mark_table[flow_id].mark_id = filter->mark;
1838                 }
1839
1840                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1841                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1842
1843                 if (BNXT_FLOW_XSTATS_EN(bp))
1844                         bp->flow_stat->flow_count++;
1845                 bnxt_release_flow_lock(bp);
1846                 bnxt_setup_flow_counter(bp);
1847                 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
1848                 return flow;
1849         }
1850
1851 free_filter:
1852         bnxt_free_filter(bp, filter);
1853 free_flow:
1854         if (ret == -EEXIST)
1855                 rte_flow_error_set(error, ret,
1856                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1857                                    "Matching Flow exists.");
1858         else if (ret == -EXDEV)
1859                 rte_flow_error_set(error, 0,
1860                                    RTE_FLOW_ERROR_TYPE_NONE, NULL,
1861                                    "Flow with pattern exists, updating destination queue");
1862         else if (!rte_errno)
1863                 rte_flow_error_set(error, -ret,
1864                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1865                                    "Failed to create flow.");
1866         rte_free(flow);
1867         flow = NULL;
1868         bnxt_release_flow_lock(bp);
1869         return flow;
1870 }
1871
1872 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1873                                                struct bnxt_filter_info *filter,
1874                                                struct rte_flow_error *error)
1875 {
1876         uint16_t tun_dst_fid;
1877         uint32_t tun_type;
1878         int ret = 0;
1879
1880         ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1881         if (ret) {
1882                 rte_flow_error_set(error, -ret,
1883                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1884                                    "Unable to query tunnel to VF");
1885                 return ret;
1886         }
1887         if (tun_type == (1U << filter->tunnel_type)) {
1888                 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1889                                                      &tun_dst_fid);
1890                 if (ret) {
1891                         rte_flow_error_set(error, -ret,
1892                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1893                                            NULL,
1894                                            "tunnel_redirect info cmd fail");
1895                         return ret;
1896                 }
1897                 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1898                             tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1899
1900                 /* Tunnel doesn't belong to this VF, so don't send HWRM
1901                  * cmd, just delete the flow from driver
1902                  */
1903                 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1904                         PMD_DRV_LOG(ERR,
1905                                     "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1906                 else
1907                         ret = bnxt_hwrm_tunnel_redirect_free(bp,
1908                                                         filter->tunnel_type);
1909         }
1910         return ret;
1911 }
1912
1913 static int
1914 _bnxt_flow_destroy(struct bnxt *bp,
1915                    struct rte_flow *flow,
1916                     struct rte_flow_error *error)
1917 {
1918         struct bnxt_filter_info *filter;
1919         struct bnxt_vnic_info *vnic;
1920         int ret = 0;
1921         uint32_t flow_id;
1922
1923         filter = flow->filter;
1924         vnic = flow->vnic;
1925
1926         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1927             filter->enables == filter->tunnel_type) {
1928                 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error);
1929                 if (!ret)
1930                         goto done;
1931                 else
1932                         return ret;
1933         }
1934
1935         ret = bnxt_match_filter(bp, filter);
1936         if (ret == 0)
1937                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1938
1939         if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1940                 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1941                 memset(&bp->mark_table[flow_id], 0,
1942                        sizeof(bp->mark_table[flow_id]));
1943                 filter->flow_id = 0;
1944         }
1945
1946         ret = bnxt_clear_one_vnic_filter(bp, filter);
1947
1948 done:
1949         if (!ret) {
1950                 /* If it is a L2 drop filter, when the filter is created,
1951                  * the FW updates the BC/MC records.
1952                  * Once this filter is removed, issue the set_rx_mask command
1953                  * to reset the BC/MC records in the HW to the settings
1954                  * before the drop counter is created.
1955                  */
1956                 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
1957                         bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
1958
1959                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1960                 bnxt_free_filter(bp, filter);
1961                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1962                 rte_free(flow);
1963                 if (BNXT_FLOW_XSTATS_EN(bp))
1964                         bp->flow_stat->flow_count--;
1965
1966                 /* If this was the last flow associated with this vnic,
1967                  * switch the queue back to RSS pool.
1968                  */
1969                 if (vnic && !vnic->func_default &&
1970                     STAILQ_EMPTY(&vnic->flow_list)) {
1971                         rte_free(vnic->fw_grp_ids);
1972                         if (vnic->rx_queue_cnt > 1)
1973                                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1974
1975                         bnxt_hwrm_vnic_free(bp, vnic);
1976                         vnic->rx_queue_cnt = 0;
1977                 }
1978         } else {
1979                 rte_flow_error_set(error, -ret,
1980                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1981                                    "Failed to destroy flow.");
1982         }
1983
1984         return ret;
1985 }
1986
1987 static int
1988 bnxt_flow_destroy(struct rte_eth_dev *dev,
1989                   struct rte_flow *flow,
1990                   struct rte_flow_error *error)
1991 {
1992         struct bnxt *bp = dev->data->dev_private;
1993         int ret = 0;
1994
1995         bnxt_acquire_flow_lock(bp);
1996         if (!flow) {
1997                 rte_flow_error_set(error, EINVAL,
1998                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1999                                    "Invalid flow: failed to destroy flow.");
2000                 bnxt_release_flow_lock(bp);
2001                 return -EINVAL;
2002         }
2003
2004         if (!flow->filter) {
2005                 rte_flow_error_set(error, EINVAL,
2006                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2007                                    "Invalid flow: failed to destroy flow.");
2008                 bnxt_release_flow_lock(bp);
2009                 return -EINVAL;
2010         }
2011         ret = _bnxt_flow_destroy(bp, flow, error);
2012         bnxt_release_flow_lock(bp);
2013
2014         return ret;
2015 }
2016
2017 void bnxt_cancel_fc_thread(struct bnxt *bp)
2018 {
2019         bp->flags &= ~BNXT_FLAG_FC_THREAD;
2020         rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp);
2021 }
2022
2023 static int
2024 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2025 {
2026         struct bnxt *bp = dev->data->dev_private;
2027         struct bnxt_vnic_info *vnic;
2028         struct rte_flow *flow;
2029         unsigned int i;
2030         int ret = 0;
2031
2032         bnxt_acquire_flow_lock(bp);
2033         for (i = 0; i < bp->max_vnics; i++) {
2034                 vnic = &bp->vnic_info[i];
2035                 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
2036                         continue;
2037
2038                 while (!STAILQ_EMPTY(&vnic->flow_list)) {
2039                         flow = STAILQ_FIRST(&vnic->flow_list);
2040
2041                         if (!flow->filter)
2042                                 continue;
2043
2044                         ret = _bnxt_flow_destroy(bp, flow, error);
2045                         if (ret)
2046                                 break;
2047                 }
2048         }
2049
2050         bnxt_cancel_fc_thread(bp);
2051         bnxt_release_flow_lock(bp);
2052
2053         return ret;
2054 }
2055
2056 const struct rte_flow_ops bnxt_flow_ops = {
2057         .validate = bnxt_flow_validate,
2058         .create = bnxt_flow_create,
2059         .destroy = bnxt_flow_destroy,
2060         .flush = bnxt_flow_flush,
2061 };