net/bnxt: modify VNIC accounting
[dpdk.git] / drivers / net / bnxt / bnxt_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13 #include <rte_alarm.h>
14 #include <rte_cycles.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_ring.h"
20 #include "bnxt_rxq.h"
21 #include "bnxt_rxr.h"
22 #include "bnxt_vnic.h"
23 #include "hsi_struct_def_dpdk.h"
24
25 static int
26 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
27                         const struct rte_flow_item pattern[],
28                         const struct rte_flow_action actions[],
29                         struct rte_flow_error *error)
30 {
31         if (!pattern) {
32                 rte_flow_error_set(error,
33                                    EINVAL,
34                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM,
35                                    NULL,
36                                    "NULL pattern.");
37                 return -rte_errno;
38         }
39
40         if (!actions) {
41                 rte_flow_error_set(error,
42                                    EINVAL,
43                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
44                                    NULL,
45                                    "NULL action.");
46                 return -rte_errno;
47         }
48
49         if (!attr) {
50                 rte_flow_error_set(error,
51                                    EINVAL,
52                                    RTE_FLOW_ERROR_TYPE_ATTR,
53                                    NULL,
54                                    "NULL attribute.");
55                 return -rte_errno;
56         }
57
58         return 0;
59 }
60
61 static const struct rte_flow_item *
62 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
63 {
64         while (1) {
65                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
66                         return cur;
67                 cur++;
68         }
69 }
70
71 static const struct rte_flow_action *
72 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
73 {
74         while (1) {
75                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
76                         return cur;
77                 cur++;
78         }
79 }
80
81 static int
82 bnxt_filter_type_check(const struct rte_flow_item pattern[],
83                        struct rte_flow_error *error)
84 {
85         const struct rte_flow_item *item =
86                 bnxt_flow_non_void_item(pattern);
87         int use_ntuple = 1;
88         bool has_vlan = 0;
89
90         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
91                 switch (item->type) {
92                 case RTE_FLOW_ITEM_TYPE_ANY:
93                 case RTE_FLOW_ITEM_TYPE_ETH:
94                         use_ntuple = 0;
95                         break;
96                 case RTE_FLOW_ITEM_TYPE_VLAN:
97                         use_ntuple = 0;
98                         has_vlan = 1;
99                         break;
100                 case RTE_FLOW_ITEM_TYPE_IPV4:
101                 case RTE_FLOW_ITEM_TYPE_IPV6:
102                 case RTE_FLOW_ITEM_TYPE_TCP:
103                 case RTE_FLOW_ITEM_TYPE_UDP:
104                         /* FALLTHROUGH */
105                         /* need ntuple match, reset exact match */
106                         use_ntuple |= 1;
107                         break;
108                 default:
109                         PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
110                         use_ntuple |= 0;
111                 }
112                 item++;
113         }
114
115         if (has_vlan && use_ntuple) {
116                 PMD_DRV_LOG(ERR,
117                             "VLAN flow cannot use NTUPLE filter\n");
118                 rte_flow_error_set(error, EINVAL,
119                                    RTE_FLOW_ERROR_TYPE_ITEM,
120                                    item,
121                                    "Cannot use VLAN with NTUPLE");
122                 return -rte_errno;
123         }
124
125         return use_ntuple;
126 }
127
128 static int
129 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
130                                   const struct rte_flow_attr *attr,
131                                   const struct rte_flow_item pattern[],
132                                   struct rte_flow_error *error,
133                                   struct bnxt_filter_info *filter)
134 {
135         const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
136         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
137         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
138         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
139         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
140         const struct rte_flow_item_udp *udp_spec, *udp_mask;
141         const struct rte_flow_item_eth *eth_spec, *eth_mask;
142         const struct rte_ether_addr *dst, *src;
143         const struct rte_flow_item_nvgre *nvgre_spec;
144         const struct rte_flow_item_nvgre *nvgre_mask;
145         const struct rte_flow_item_gre *gre_spec;
146         const struct rte_flow_item_gre *gre_mask;
147         const struct rte_flow_item_vxlan *vxlan_spec;
148         const struct rte_flow_item_vxlan *vxlan_mask;
149         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
150         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
151         const struct rte_flow_item_vf *vf_spec;
152         uint32_t tenant_id_be = 0, valid_flags = 0;
153         bool vni_masked = 0;
154         bool tni_masked = 0;
155         uint32_t en_ethertype;
156         uint8_t inner = 0;
157         uint32_t vf = 0;
158         uint32_t en = 0;
159         int use_ntuple;
160         int dflt_vnic;
161
162         use_ntuple = bnxt_filter_type_check(pattern, error);
163         if (use_ntuple < 0)
164                 return use_ntuple;
165         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
166
167         filter->filter_type = use_ntuple ?
168                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
169         en_ethertype = use_ntuple ?
170                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
171                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
172
173         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
174                 if (item->last) {
175                         /* last or range is NOT supported as match criteria */
176                         rte_flow_error_set(error, EINVAL,
177                                            RTE_FLOW_ERROR_TYPE_ITEM,
178                                            item,
179                                            "No support for range");
180                         return -rte_errno;
181                 }
182
183                 switch (item->type) {
184                 case RTE_FLOW_ITEM_TYPE_ANY:
185                         inner =
186                         ((const struct rte_flow_item_any *)item->spec)->num > 3;
187                         if (inner)
188                                 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
189                         break;
190                 case RTE_FLOW_ITEM_TYPE_ETH:
191                         if (!item->spec || !item->mask)
192                                 break;
193
194                         eth_spec = item->spec;
195                         eth_mask = item->mask;
196
197                         /* Source MAC address mask cannot be partially set.
198                          * Should be All 0's or all 1's.
199                          * Destination MAC address mask must not be partially
200                          * set. Should be all 1's or all 0's.
201                          */
202                         if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
203                              !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
204                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
205                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
206                                 rte_flow_error_set(error,
207                                                    EINVAL,
208                                                    RTE_FLOW_ERROR_TYPE_ITEM,
209                                                    item,
210                                                    "MAC_addr mask not valid");
211                                 return -rte_errno;
212                         }
213
214                         /* Mask is not allowed. Only exact matches are */
215                         if (eth_mask->type &&
216                             eth_mask->type != RTE_BE16(0xffff)) {
217                                 rte_flow_error_set(error, EINVAL,
218                                                    RTE_FLOW_ERROR_TYPE_ITEM,
219                                                    item,
220                                                    "ethertype mask not valid");
221                                 return -rte_errno;
222                         }
223
224                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
225                                 dst = &eth_spec->dst;
226                                 if (!rte_is_valid_assigned_ether_addr(dst)) {
227                                         rte_flow_error_set(error,
228                                                            EINVAL,
229                                                            RTE_FLOW_ERROR_TYPE_ITEM,
230                                                            item,
231                                                            "DMAC is invalid");
232                                         PMD_DRV_LOG(ERR,
233                                                     "DMAC is invalid!\n");
234                                         return -rte_errno;
235                                 }
236                                 rte_memcpy(filter->dst_macaddr,
237                                            &eth_spec->dst, RTE_ETHER_ADDR_LEN);
238                                 en |= use_ntuple ?
239                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
240                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
241                                 valid_flags |= inner ?
242                                         BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
243                                         BNXT_FLOW_L2_DST_VALID_FLAG;
244                                 filter->priority = attr->priority;
245                                 PMD_DRV_LOG(DEBUG,
246                                             "Creating a priority flow\n");
247                         }
248                         if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
249                                 src = &eth_spec->src;
250                                 if (!rte_is_valid_assigned_ether_addr(src)) {
251                                         rte_flow_error_set(error,
252                                                            EINVAL,
253                                                            RTE_FLOW_ERROR_TYPE_ITEM,
254                                                            item,
255                                                            "SMAC is invalid");
256                                         PMD_DRV_LOG(ERR,
257                                                     "SMAC is invalid!\n");
258                                         return -rte_errno;
259                                 }
260                                 rte_memcpy(filter->src_macaddr,
261                                            &eth_spec->src, RTE_ETHER_ADDR_LEN);
262                                 en |= use_ntuple ?
263                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
264                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
265                                 valid_flags |= inner ?
266                                         BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
267                                         BNXT_FLOW_L2_SRC_VALID_FLAG;
268                         } /*
269                            * else {
270                            *  PMD_DRV_LOG(ERR, "Handle this condition\n");
271                            * }
272                            */
273                         if (eth_mask->type) {
274                                 filter->ethertype =
275                                         rte_be_to_cpu_16(eth_spec->type);
276                                 en |= en_ethertype;
277                         }
278                         if (inner)
279                                 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
280
281                         break;
282                 case RTE_FLOW_ITEM_TYPE_VLAN:
283                         vlan_spec = item->spec;
284                         vlan_mask = item->mask;
285                         if (en & en_ethertype) {
286                                 rte_flow_error_set(error, EINVAL,
287                                                    RTE_FLOW_ERROR_TYPE_ITEM,
288                                                    item,
289                                                    "VLAN TPID matching is not"
290                                                    " supported");
291                                 return -rte_errno;
292                         }
293                         if (vlan_mask->tci &&
294                             vlan_mask->tci == RTE_BE16(0x0fff)) {
295                                 /* Only the VLAN ID can be matched. */
296                                 filter->l2_ovlan =
297                                         rte_be_to_cpu_16(vlan_spec->tci &
298                                                          RTE_BE16(0x0fff));
299                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
300                         } else {
301                                 rte_flow_error_set(error,
302                                                    EINVAL,
303                                                    RTE_FLOW_ERROR_TYPE_ITEM,
304                                                    item,
305                                                    "VLAN mask is invalid");
306                                 return -rte_errno;
307                         }
308                         if (vlan_mask->inner_type &&
309                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
310                                 rte_flow_error_set(error, EINVAL,
311                                                    RTE_FLOW_ERROR_TYPE_ITEM,
312                                                    item,
313                                                    "inner ethertype mask not"
314                                                    " valid");
315                                 return -rte_errno;
316                         }
317                         if (vlan_mask->inner_type) {
318                                 filter->ethertype =
319                                         rte_be_to_cpu_16(vlan_spec->inner_type);
320                                 en |= en_ethertype;
321                         }
322
323                         break;
324                 case RTE_FLOW_ITEM_TYPE_IPV4:
325                         /* If mask is not involved, we could use EM filters. */
326                         ipv4_spec = item->spec;
327                         ipv4_mask = item->mask;
328
329                         if (!item->spec || !item->mask)
330                                 break;
331
332                         /* Only IP DST and SRC fields are maskable. */
333                         if (ipv4_mask->hdr.version_ihl ||
334                             ipv4_mask->hdr.type_of_service ||
335                             ipv4_mask->hdr.total_length ||
336                             ipv4_mask->hdr.packet_id ||
337                             ipv4_mask->hdr.fragment_offset ||
338                             ipv4_mask->hdr.time_to_live ||
339                             ipv4_mask->hdr.next_proto_id ||
340                             ipv4_mask->hdr.hdr_checksum) {
341                                 rte_flow_error_set(error,
342                                                    EINVAL,
343                                                    RTE_FLOW_ERROR_TYPE_ITEM,
344                                                    item,
345                                                    "Invalid IPv4 mask.");
346                                 return -rte_errno;
347                         }
348
349                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
350                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
351
352                         if (use_ntuple)
353                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
354                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
355                         else
356                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
357                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
358
359                         if (ipv4_mask->hdr.src_addr) {
360                                 filter->src_ipaddr_mask[0] =
361                                         ipv4_mask->hdr.src_addr;
362                                 en |= !use_ntuple ? 0 :
363                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
364                         }
365
366                         if (ipv4_mask->hdr.dst_addr) {
367                                 filter->dst_ipaddr_mask[0] =
368                                         ipv4_mask->hdr.dst_addr;
369                                 en |= !use_ntuple ? 0 :
370                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
371                         }
372
373                         filter->ip_addr_type = use_ntuple ?
374                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
375                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
376
377                         if (ipv4_spec->hdr.next_proto_id) {
378                                 filter->ip_protocol =
379                                         ipv4_spec->hdr.next_proto_id;
380                                 if (use_ntuple)
381                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
382                                 else
383                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
384                         }
385                         break;
386                 case RTE_FLOW_ITEM_TYPE_IPV6:
387                         ipv6_spec = item->spec;
388                         ipv6_mask = item->mask;
389
390                         if (!item->spec || !item->mask)
391                                 break;
392
393                         /* Only IP DST and SRC fields are maskable. */
394                         if (ipv6_mask->hdr.vtc_flow ||
395                             ipv6_mask->hdr.payload_len ||
396                             ipv6_mask->hdr.proto ||
397                             ipv6_mask->hdr.hop_limits) {
398                                 rte_flow_error_set(error,
399                                                    EINVAL,
400                                                    RTE_FLOW_ERROR_TYPE_ITEM,
401                                                    item,
402                                                    "Invalid IPv6 mask.");
403                                 return -rte_errno;
404                         }
405
406                         if (use_ntuple)
407                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
408                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
409                         else
410                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
411                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
412
413                         rte_memcpy(filter->src_ipaddr,
414                                    ipv6_spec->hdr.src_addr, 16);
415                         rte_memcpy(filter->dst_ipaddr,
416                                    ipv6_spec->hdr.dst_addr, 16);
417
418                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
419                                                    16)) {
420                                 rte_memcpy(filter->src_ipaddr_mask,
421                                            ipv6_mask->hdr.src_addr, 16);
422                                 en |= !use_ntuple ? 0 :
423                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
424                         }
425
426                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
427                                                    16)) {
428                                 rte_memcpy(filter->dst_ipaddr_mask,
429                                            ipv6_mask->hdr.dst_addr, 16);
430                                 en |= !use_ntuple ? 0 :
431                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
432                         }
433
434                         filter->ip_addr_type = use_ntuple ?
435                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
436                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
437                         break;
438                 case RTE_FLOW_ITEM_TYPE_TCP:
439                         tcp_spec = item->spec;
440                         tcp_mask = item->mask;
441
442                         if (!item->spec || !item->mask)
443                                 break;
444
445                         /* Check TCP mask. Only DST & SRC ports are maskable */
446                         if (tcp_mask->hdr.sent_seq ||
447                             tcp_mask->hdr.recv_ack ||
448                             tcp_mask->hdr.data_off ||
449                             tcp_mask->hdr.tcp_flags ||
450                             tcp_mask->hdr.rx_win ||
451                             tcp_mask->hdr.cksum ||
452                             tcp_mask->hdr.tcp_urp) {
453                                 rte_flow_error_set(error,
454                                                    EINVAL,
455                                                    RTE_FLOW_ERROR_TYPE_ITEM,
456                                                    item,
457                                                    "Invalid TCP mask");
458                                 return -rte_errno;
459                         }
460
461                         filter->src_port = tcp_spec->hdr.src_port;
462                         filter->dst_port = tcp_spec->hdr.dst_port;
463
464                         if (use_ntuple)
465                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
466                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
467                         else
468                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
469                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
470
471                         if (tcp_mask->hdr.dst_port) {
472                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
473                                 en |= !use_ntuple ? 0 :
474                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
475                         }
476
477                         if (tcp_mask->hdr.src_port) {
478                                 filter->src_port_mask = tcp_mask->hdr.src_port;
479                                 en |= !use_ntuple ? 0 :
480                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
481                         }
482                         break;
483                 case RTE_FLOW_ITEM_TYPE_UDP:
484                         udp_spec = item->spec;
485                         udp_mask = item->mask;
486
487                         if (!item->spec || !item->mask)
488                                 break;
489
490                         if (udp_mask->hdr.dgram_len ||
491                             udp_mask->hdr.dgram_cksum) {
492                                 rte_flow_error_set(error,
493                                                    EINVAL,
494                                                    RTE_FLOW_ERROR_TYPE_ITEM,
495                                                    item,
496                                                    "Invalid UDP mask");
497                                 return -rte_errno;
498                         }
499
500                         filter->src_port = udp_spec->hdr.src_port;
501                         filter->dst_port = udp_spec->hdr.dst_port;
502
503                         if (use_ntuple)
504                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
505                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
506                         else
507                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
508                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
509
510                         if (udp_mask->hdr.dst_port) {
511                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
512                                 en |= !use_ntuple ? 0 :
513                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
514                         }
515
516                         if (udp_mask->hdr.src_port) {
517                                 filter->src_port_mask = udp_mask->hdr.src_port;
518                                 en |= !use_ntuple ? 0 :
519                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
520                         }
521                         break;
522                 case RTE_FLOW_ITEM_TYPE_VXLAN:
523                         vxlan_spec = item->spec;
524                         vxlan_mask = item->mask;
525                         /* Check if VXLAN item is used to describe protocol.
526                          * If yes, both spec and mask should be NULL.
527                          * If no, both spec and mask shouldn't be NULL.
528                          */
529                         if ((!vxlan_spec && vxlan_mask) ||
530                             (vxlan_spec && !vxlan_mask)) {
531                                 rte_flow_error_set(error,
532                                                    EINVAL,
533                                                    RTE_FLOW_ERROR_TYPE_ITEM,
534                                                    item,
535                                                    "Invalid VXLAN item");
536                                 return -rte_errno;
537                         }
538
539                         if (!vxlan_spec && !vxlan_mask) {
540                                 filter->tunnel_type =
541                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
542                                 break;
543                         }
544
545                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
546                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
547                             vxlan_spec->flags != 0x8) {
548                                 rte_flow_error_set(error,
549                                                    EINVAL,
550                                                    RTE_FLOW_ERROR_TYPE_ITEM,
551                                                    item,
552                                                    "Invalid VXLAN item");
553                                 return -rte_errno;
554                         }
555
556                         /* Check if VNI is masked. */
557                         if (vxlan_mask != NULL) {
558                                 vni_masked =
559                                         !!memcmp(vxlan_mask->vni, vni_mask,
560                                                  RTE_DIM(vni_mask));
561                                 if (vni_masked) {
562                                         rte_flow_error_set
563                                                 (error,
564                                                  EINVAL,
565                                                  RTE_FLOW_ERROR_TYPE_ITEM,
566                                                  item,
567                                                  "Invalid VNI mask");
568                                         return -rte_errno;
569                                 }
570
571                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
572                                            vxlan_spec->vni, 3);
573                                 filter->vni =
574                                         rte_be_to_cpu_32(tenant_id_be);
575                                 filter->tunnel_type =
576                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
577                         }
578                         break;
579                 case RTE_FLOW_ITEM_TYPE_NVGRE:
580                         nvgre_spec = item->spec;
581                         nvgre_mask = item->mask;
582                         /* Check if NVGRE item is used to describe protocol.
583                          * If yes, both spec and mask should be NULL.
584                          * If no, both spec and mask shouldn't be NULL.
585                          */
586                         if ((!nvgre_spec && nvgre_mask) ||
587                             (nvgre_spec && !nvgre_mask)) {
588                                 rte_flow_error_set(error,
589                                                    EINVAL,
590                                                    RTE_FLOW_ERROR_TYPE_ITEM,
591                                                    item,
592                                                    "Invalid NVGRE item");
593                                 return -rte_errno;
594                         }
595
596                         if (!nvgre_spec && !nvgre_mask) {
597                                 filter->tunnel_type =
598                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
599                                 break;
600                         }
601
602                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
603                             nvgre_spec->protocol != 0x6558) {
604                                 rte_flow_error_set(error,
605                                                    EINVAL,
606                                                    RTE_FLOW_ERROR_TYPE_ITEM,
607                                                    item,
608                                                    "Invalid NVGRE item");
609                                 return -rte_errno;
610                         }
611
612                         if (nvgre_spec && nvgre_mask) {
613                                 tni_masked =
614                                         !!memcmp(nvgre_mask->tni, tni_mask,
615                                                  RTE_DIM(tni_mask));
616                                 if (tni_masked) {
617                                         rte_flow_error_set
618                                                 (error,
619                                                  EINVAL,
620                                                  RTE_FLOW_ERROR_TYPE_ITEM,
621                                                  item,
622                                                  "Invalid TNI mask");
623                                         return -rte_errno;
624                                 }
625                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
626                                            nvgre_spec->tni, 3);
627                                 filter->vni =
628                                         rte_be_to_cpu_32(tenant_id_be);
629                                 filter->tunnel_type =
630                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
631                         }
632                         break;
633
634                 case RTE_FLOW_ITEM_TYPE_GRE:
635                         gre_spec = (const struct rte_flow_item_gre *)item->spec;
636                         gre_mask = (const struct rte_flow_item_gre *)item->mask;
637
638                         /*
639                          *Check if GRE item is used to describe protocol.
640                          * If yes, both spec and mask should be NULL.
641                          * If no, both spec and mask shouldn't be NULL.
642                          */
643                         if (!!gre_spec ^ !!gre_mask) {
644                                 rte_flow_error_set(error, EINVAL,
645                                                    RTE_FLOW_ERROR_TYPE_ITEM,
646                                                    item,
647                                                    "Invalid GRE item");
648                                 return -rte_errno;
649                         }
650
651                         if (!gre_spec && !gre_mask) {
652                                 filter->tunnel_type =
653                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
654                                 break;
655                         }
656                         break;
657
658                 case RTE_FLOW_ITEM_TYPE_VF:
659                         vf_spec = item->spec;
660                         vf = vf_spec->id;
661                         if (!BNXT_PF(bp)) {
662                                 rte_flow_error_set(error,
663                                                    EINVAL,
664                                                    RTE_FLOW_ERROR_TYPE_ITEM,
665                                                    item,
666                                                    "Configuring on a VF!");
667                                 return -rte_errno;
668                         }
669
670                         if (vf >= bp->pdev->max_vfs) {
671                                 rte_flow_error_set(error,
672                                                    EINVAL,
673                                                    RTE_FLOW_ERROR_TYPE_ITEM,
674                                                    item,
675                                                    "Incorrect VF id!");
676                                 return -rte_errno;
677                         }
678
679                         if (!attr->transfer) {
680                                 rte_flow_error_set(error,
681                                                    ENOTSUP,
682                                                    RTE_FLOW_ERROR_TYPE_ITEM,
683                                                    item,
684                                                    "Matching VF traffic without"
685                                                    " affecting it (transfer attribute)"
686                                                    " is unsupported");
687                                 return -rte_errno;
688                         }
689
690                         filter->mirror_vnic_id =
691                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
692                         if (dflt_vnic < 0) {
693                                 /* This simply indicates there's no driver
694                                  * loaded. This is not an error.
695                                  */
696                                 rte_flow_error_set
697                                         (error,
698                                          EINVAL,
699                                          RTE_FLOW_ERROR_TYPE_ITEM,
700                                          item,
701                                          "Unable to get default VNIC for VF");
702                                 return -rte_errno;
703                         }
704
705                         filter->mirror_vnic_id = dflt_vnic;
706                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
707                         break;
708                 default:
709                         break;
710                 }
711                 item++;
712         }
713         filter->enables = en;
714         filter->valid_flags = valid_flags;
715
716         return 0;
717 }
718
719 /* Parse attributes */
720 static int
721 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
722                      struct rte_flow_error *error)
723 {
724         /* Must be input direction */
725         if (!attr->ingress) {
726                 rte_flow_error_set(error,
727                                    EINVAL,
728                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
729                                    attr,
730                                    "Only support ingress.");
731                 return -rte_errno;
732         }
733
734         /* Not supported */
735         if (attr->egress) {
736                 rte_flow_error_set(error,
737                                    EINVAL,
738                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
739                                    attr,
740                                    "No support for egress.");
741                 return -rte_errno;
742         }
743
744         return 0;
745 }
746
747 static struct bnxt_filter_info *
748 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
749 {
750         struct bnxt_filter_info *mf, *f0;
751         struct bnxt_vnic_info *vnic0;
752         int i;
753
754         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
755         f0 = STAILQ_FIRST(&vnic0->filter);
756
757         /* This flow has same DST MAC as the port/l2 filter. */
758         if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
759                 return f0;
760
761         for (i = bp->max_vnics - 1; i >= 0; i--) {
762                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
763
764                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
765                         continue;
766
767                 STAILQ_FOREACH(mf, &vnic->filter, next) {
768
769                         if (mf->matching_l2_fltr_ptr)
770                                 continue;
771
772                         if (mf->ethertype == nf->ethertype &&
773                             mf->l2_ovlan == nf->l2_ovlan &&
774                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
775                             mf->l2_ivlan == nf->l2_ivlan &&
776                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
777                             !memcmp(mf->src_macaddr, nf->src_macaddr,
778                                     RTE_ETHER_ADDR_LEN) &&
779                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
780                                     RTE_ETHER_ADDR_LEN))
781                                 return mf;
782                 }
783         }
784         return NULL;
785 }
786
787 static struct bnxt_filter_info *
788 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
789                       struct bnxt_vnic_info *vnic)
790 {
791         struct bnxt_filter_info *filter1;
792         int rc;
793
794         /* Alloc new L2 filter.
795          * This flow needs MAC filter which does not match any existing
796          * L2 filters.
797          */
798         filter1 = bnxt_get_unused_filter(bp);
799         if (filter1 == NULL)
800                 return NULL;
801
802         memcpy(filter1, nf, sizeof(*filter1));
803
804         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
805         filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
806         if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
807             nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
808                 filter1->flags |=
809                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
810                 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
811         }
812
813         if (nf->filter_type == HWRM_CFA_L2_FILTER &&
814             (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
815              nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
816                 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
817                 filter1->flags |=
818                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
819                 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
820         } else {
821                 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
822                 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
823         }
824
825         if (nf->priority &&
826             (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
827              nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
828                 /* Tell the FW where to place the filter in the table. */
829                 if (nf->priority > 65535) {
830                         filter1->pri_hint =
831                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
832                         /* This will place the filter in TCAM */
833                         filter1->l2_filter_id_hint = (uint64_t)-1;
834                 }
835         }
836
837         if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
838                                BNXT_FLOW_L2_SRC_VALID_FLAG |
839                                BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
840                                BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
841                 filter1->enables =
842                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
843                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
844                 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
845         }
846
847         if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
848                 filter1->flags |=
849                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
850                 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
851                         /* Num VLANs for drop filter will/should be 0.
852                          * If the req is memset to 0, then the count will
853                          * be automatically set to 0.
854                          */
855                         if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
856                                 filter1->enables |=
857                                         L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
858                         } else {
859                                 filter1->enables |=
860                                         L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
861                                 filter1->flags |=
862                                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
863                         }
864                 }
865         }
866
867         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
868                                      filter1);
869         if (rc) {
870                 bnxt_free_filter(bp, filter1);
871                 return NULL;
872         }
873         return filter1;
874 }
875
876 struct bnxt_filter_info *
877 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
878                    struct bnxt_vnic_info *vnic)
879 {
880         struct bnxt_filter_info *l2_filter = NULL;
881
882         l2_filter = bnxt_find_matching_l2_filter(bp, nf);
883         if (l2_filter) {
884                 l2_filter->l2_ref_cnt++;
885         } else {
886                 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
887                 if (l2_filter) {
888                         STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next);
889                         l2_filter->vnic = vnic;
890                 }
891         }
892         nf->matching_l2_fltr_ptr = l2_filter;
893
894         return l2_filter;
895 }
896
897 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
898 {
899         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
900         uint64_t rx_offloads = dev_conf->rxmode.offloads;
901         int rc;
902
903         rc = bnxt_vnic_grp_alloc(bp, vnic);
904         if (rc)
905                 goto ret;
906
907         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
908         if (rc) {
909                 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
910                 goto ret;
911         }
912         bp->nr_vnics++;
913
914         /* RSS context is required only when there is more than one RSS ring */
915         if (vnic->rx_queue_cnt > 1) {
916                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
917                 if (rc) {
918                         PMD_DRV_LOG(ERR,
919                                     "HWRM vnic ctx alloc failure: %x\n", rc);
920                         goto ret;
921                 }
922         } else {
923                 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
924         }
925
926         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
927                 vnic->vlan_strip = true;
928         else
929                 vnic->vlan_strip = false;
930
931         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
932         if (rc)
933                 goto ret;
934
935         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
936
937 ret:
938         return rc;
939 }
940
941 static int match_vnic_rss_cfg(struct bnxt *bp,
942                               struct bnxt_vnic_info *vnic,
943                               const struct rte_flow_action_rss *rss)
944 {
945         unsigned int match = 0, i;
946
947         if (vnic->rx_queue_cnt != rss->queue_num)
948                 return -EINVAL;
949
950         for (i = 0; i < rss->queue_num; i++) {
951                 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
952                     !bp->rx_queues[rss->queue[i]]->rx_started)
953                         return -EINVAL;
954         }
955
956         for (i = 0; i < vnic->rx_queue_cnt; i++) {
957                 int j;
958
959                 for (j = 0; j < vnic->rx_queue_cnt; j++) {
960                         if (bp->grp_info[rss->queue[i]].fw_grp_id ==
961                             vnic->fw_grp_ids[j])
962                                 match++;
963                 }
964         }
965
966         if (match != vnic->rx_queue_cnt) {
967                 PMD_DRV_LOG(ERR,
968                             "VNIC queue count %d vs queues matched %d\n",
969                             match, vnic->rx_queue_cnt);
970                 return -EINVAL;
971         }
972
973         return 0;
974 }
975
976 static void
977 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
978                             struct bnxt_filter_info *filter1,
979                             int use_ntuple)
980 {
981         if (!use_ntuple &&
982             !(filter->valid_flags &
983               ~(BNXT_FLOW_L2_DST_VALID_FLAG |
984                 BNXT_FLOW_L2_SRC_VALID_FLAG |
985                 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
986                 BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
987                 BNXT_FLOW_L2_DROP_FLAG |
988                 BNXT_FLOW_PARSE_INNER_FLAG))) {
989                 filter->flags = filter1->flags;
990                 filter->enables = filter1->enables;
991                 filter->filter_type = HWRM_CFA_L2_FILTER;
992                 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
993                 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
994                 filter->pri_hint = filter1->pri_hint;
995                 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
996         }
997         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
998         filter->l2_ref_cnt = filter1->l2_ref_cnt;
999         filter->flow_id = filter1->flow_id;
1000         PMD_DRV_LOG(DEBUG,
1001                 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
1002                 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1003 }
1004
1005 static int
1006 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1007                              const struct rte_flow_item pattern[],
1008                              const struct rte_flow_action actions[],
1009                              const struct rte_flow_attr *attr,
1010                              struct rte_flow_error *error,
1011                              struct bnxt_filter_info *filter)
1012 {
1013         const struct rte_flow_action *act =
1014                 bnxt_flow_non_void_action(actions);
1015         struct bnxt *bp = dev->data->dev_private;
1016         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1017         struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1018         const struct rte_flow_action_queue *act_q;
1019         const struct rte_flow_action_vf *act_vf;
1020         struct bnxt_filter_info *filter1 = NULL;
1021         const struct rte_flow_action_rss *rss;
1022         struct bnxt_rx_queue *rxq = NULL;
1023         int dflt_vnic, vnic_id;
1024         unsigned int rss_idx;
1025         uint32_t vf = 0, i;
1026         int rc, use_ntuple;
1027
1028         rc =
1029         bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1030         if (rc != 0)
1031                 goto ret;
1032
1033         rc = bnxt_flow_parse_attr(attr, error);
1034         if (rc != 0)
1035                 goto ret;
1036
1037         /* Since we support ingress attribute only - right now. */
1038         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1039                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1040
1041         use_ntuple = bnxt_filter_type_check(pattern, error);
1042
1043 start:
1044         switch (act->type) {
1045         case RTE_FLOW_ACTION_TYPE_QUEUE:
1046                 /* Allow this flow. Redirect to a VNIC. */
1047                 act_q = (const struct rte_flow_action_queue *)act->conf;
1048                 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1049                         rte_flow_error_set(error,
1050                                            EINVAL,
1051                                            RTE_FLOW_ERROR_TYPE_ACTION,
1052                                            act,
1053                                            "Invalid queue ID.");
1054                         rc = -rte_errno;
1055                         goto ret;
1056                 }
1057                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1058
1059                 if (use_ntuple && !BNXT_RFS_NEEDS_VNIC(bp)) {
1060                         filter->flags =
1061                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX;
1062                         filter->dst_id = act_q->index;
1063                         goto skip_vnic_alloc;
1064                 }
1065
1066                 vnic_id = attr->group;
1067                 if (!vnic_id) {
1068                         PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1069                         vnic_id = act_q->index;
1070                 }
1071
1072                 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1073
1074                 vnic = &bp->vnic_info[vnic_id];
1075                 if (vnic->rx_queue_cnt) {
1076                         if (vnic->start_grp_id != act_q->index) {
1077                                 PMD_DRV_LOG(ERR,
1078                                             "VNIC already in use\n");
1079                                 rte_flow_error_set(error,
1080                                                    EINVAL,
1081                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1082                                                    act,
1083                                                    "VNIC already in use");
1084                                 rc = -rte_errno;
1085                                 goto ret;
1086                         }
1087                         goto use_vnic;
1088                 }
1089
1090                 rxq = bp->rx_queues[act_q->index];
1091
1092                 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1093                     vnic->fw_vnic_id != INVALID_HW_RING_ID)
1094                         goto use_vnic;
1095
1096                 if (!rxq) {
1097                         PMD_DRV_LOG(ERR,
1098                                     "Queue invalid or used with other VNIC\n");
1099                         rte_flow_error_set(error,
1100                                            EINVAL,
1101                                            RTE_FLOW_ERROR_TYPE_ACTION,
1102                                            act,
1103                                            "Queue invalid queue or in use");
1104                         rc = -rte_errno;
1105                         goto ret;
1106                 }
1107
1108                 rxq->vnic = vnic;
1109                 rxq->rx_started = 1;
1110                 vnic->rx_queue_cnt++;
1111                 vnic->start_grp_id = act_q->index;
1112                 vnic->end_grp_id = act_q->index;
1113                 vnic->func_default = 0; //This is not a default VNIC.
1114
1115                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1116
1117                 rc = bnxt_vnic_prep(bp, vnic);
1118                 if (rc)  {
1119                         rte_flow_error_set(error,
1120                                            EINVAL,
1121                                            RTE_FLOW_ERROR_TYPE_ACTION,
1122                                            act,
1123                                            "VNIC prep fail");
1124                         rc = -rte_errno;
1125                         goto ret;
1126                 }
1127
1128                 PMD_DRV_LOG(DEBUG,
1129                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1130                             act_q->index, vnic, vnic->fw_grp_ids);
1131
1132 use_vnic:
1133                 vnic->ff_pool_idx = vnic_id;
1134                 PMD_DRV_LOG(DEBUG,
1135                             "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1136                 filter->dst_id = vnic->fw_vnic_id;
1137 skip_vnic_alloc:
1138                 /* For ntuple filter, create the L2 filter with default VNIC.
1139                  * The user specified redirect queue will be set while creating
1140                  * the ntuple filter in hardware.
1141                  */
1142                 vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
1143                 if (use_ntuple)
1144                         filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1145                 else
1146                         filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1147                 if (filter1 == NULL) {
1148                         rte_flow_error_set(error,
1149                                            ENOSPC,
1150                                            RTE_FLOW_ERROR_TYPE_ACTION,
1151                                            act,
1152                                            "Filter not available");
1153                         rc = -rte_errno;
1154                         goto ret;
1155                 }
1156
1157                 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1158                             filter, filter1, filter1->l2_ref_cnt);
1159                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1160                 break;
1161         case RTE_FLOW_ACTION_TYPE_DROP:
1162                 vnic0 = &bp->vnic_info[0];
1163                 filter->dst_id = vnic0->fw_vnic_id;
1164                 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1165                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1166                 if (filter1 == NULL) {
1167                         rte_flow_error_set(error,
1168                                            ENOSPC,
1169                                            RTE_FLOW_ERROR_TYPE_ACTION,
1170                                            act,
1171                                            "Filter not available");
1172                         rc = -rte_errno;
1173                         goto ret;
1174                 }
1175
1176                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1177                         filter->flags =
1178                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1179                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1180                         filter->flags =
1181                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1182
1183                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1184                 break;
1185         case RTE_FLOW_ACTION_TYPE_COUNT:
1186                 vnic0 = &bp->vnic_info[0];
1187                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1188                 if (filter1 == NULL) {
1189                         rte_flow_error_set(error,
1190                                            ENOSPC,
1191                                            RTE_FLOW_ERROR_TYPE_ACTION,
1192                                            act,
1193                                            "New filter not available");
1194                         rc = -rte_errno;
1195                         goto ret;
1196                 }
1197
1198                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1199                 filter->flow_id = filter1->flow_id;
1200                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1201                 break;
1202         case RTE_FLOW_ACTION_TYPE_VF:
1203                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1204                 vf = act_vf->id;
1205
1206                 if (filter->tunnel_type ==
1207                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1208                     filter->tunnel_type ==
1209                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1210                         /* If issued on a VF, ensure id is 0 and is trusted */
1211                         if (BNXT_VF(bp)) {
1212                                 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1213                                         rte_flow_error_set(error, EINVAL,
1214                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1215                                                 act,
1216                                                 "Incorrect VF");
1217                                         rc = -rte_errno;
1218                                         goto ret;
1219                                 }
1220                         }
1221
1222                         filter->enables |= filter->tunnel_type;
1223                         filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1224                         goto done;
1225                 }
1226
1227                 if (vf >= bp->pdev->max_vfs) {
1228                         rte_flow_error_set(error,
1229                                            EINVAL,
1230                                            RTE_FLOW_ERROR_TYPE_ACTION,
1231                                            act,
1232                                            "Incorrect VF id!");
1233                         rc = -rte_errno;
1234                         goto ret;
1235                 }
1236
1237                 filter->mirror_vnic_id =
1238                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1239                 if (dflt_vnic < 0) {
1240                         /* This simply indicates there's no driver loaded.
1241                          * This is not an error.
1242                          */
1243                         rte_flow_error_set(error,
1244                                            EINVAL,
1245                                            RTE_FLOW_ERROR_TYPE_ACTION,
1246                                            act,
1247                                            "Unable to get default VNIC for VF");
1248                         rc = -rte_errno;
1249                         goto ret;
1250                 }
1251
1252                 filter->mirror_vnic_id = dflt_vnic;
1253                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1254
1255                 vnic0 = &bp->vnic_info[0];
1256                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1257                 if (filter1 == NULL) {
1258                         rte_flow_error_set(error,
1259                                            ENOSPC,
1260                                            RTE_FLOW_ERROR_TYPE_ACTION,
1261                                            act,
1262                                            "New filter not available");
1263                         rc = -rte_errno;
1264                         goto ret;
1265                 }
1266
1267                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1268                 filter->flow_id = filter1->flow_id;
1269                 break;
1270         case RTE_FLOW_ACTION_TYPE_RSS:
1271                 rss = (const struct rte_flow_action_rss *)act->conf;
1272
1273                 vnic_id = attr->group;
1274
1275                 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1276                 vnic = &bp->vnic_info[vnic_id];
1277
1278                 /* Check if requested RSS config matches RSS config of VNIC
1279                  * only if it is not a fresh VNIC configuration.
1280                  * Otherwise the existing VNIC configuration can be used.
1281                  */
1282                 if (vnic->rx_queue_cnt) {
1283                         rc = match_vnic_rss_cfg(bp, vnic, rss);
1284                         if (rc) {
1285                                 PMD_DRV_LOG(ERR,
1286                                             "VNIC and RSS config mismatch\n");
1287                                 rte_flow_error_set(error,
1288                                                    EINVAL,
1289                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1290                                                    act,
1291                                                    "VNIC and RSS cfg mismatch");
1292                                 rc = -rte_errno;
1293                                 goto ret;
1294                         }
1295                         goto vnic_found;
1296                 }
1297
1298                 for (i = 0; i < rss->queue_num; i++) {
1299                         PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1300                                     rss->queue[i]);
1301
1302                         if (!rss->queue[i] ||
1303                             rss->queue[i] >= bp->rx_nr_rings ||
1304                             !bp->rx_queues[rss->queue[i]]) {
1305                                 rte_flow_error_set(error,
1306                                                    EINVAL,
1307                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1308                                                    act,
1309                                                    "Invalid queue ID for RSS");
1310                                 rc = -rte_errno;
1311                                 goto ret;
1312                         }
1313                         rxq = bp->rx_queues[rss->queue[i]];
1314
1315                         if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1316                             INVALID_HW_RING_ID) {
1317                                 PMD_DRV_LOG(ERR,
1318                                             "queue active with other VNIC\n");
1319                                 rte_flow_error_set(error,
1320                                                    EINVAL,
1321                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1322                                                    act,
1323                                                    "Invalid queue ID for RSS");
1324                                 rc = -rte_errno;
1325                                 goto ret;
1326                         }
1327
1328                         rxq->vnic = vnic;
1329                         rxq->rx_started = 1;
1330                         vnic->rx_queue_cnt++;
1331                 }
1332
1333                 vnic->start_grp_id = rss->queue[0];
1334                 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1335                 vnic->func_default = 0; //This is not a default VNIC.
1336
1337                 rc = bnxt_vnic_prep(bp, vnic);
1338                 if (rc) {
1339                         rte_flow_error_set(error,
1340                                            EINVAL,
1341                                            RTE_FLOW_ERROR_TYPE_ACTION,
1342                                            act,
1343                                            "VNIC prep fail");
1344                         rc = -rte_errno;
1345                         goto ret;
1346                 }
1347
1348                 PMD_DRV_LOG(DEBUG,
1349                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1350                             vnic_id, vnic, vnic->fw_grp_ids);
1351
1352                 vnic->ff_pool_idx = vnic_id;
1353                 PMD_DRV_LOG(DEBUG,
1354                             "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1355
1356                 /* This can be done only after vnic_grp_alloc is done. */
1357                 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1358                         vnic->fw_grp_ids[i] =
1359                                 bp->grp_info[rss->queue[i]].fw_grp_id;
1360                         /* Make sure vnic0 does not use these rings. */
1361                         bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1362                                 INVALID_HW_RING_ID;
1363                 }
1364
1365                 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1366                         for (i = 0; i < vnic->rx_queue_cnt; i++)
1367                                 vnic->rss_table[rss_idx++] =
1368                                         vnic->fw_grp_ids[i];
1369                 }
1370
1371                 /* Configure RSS only if the queue count is > 1 */
1372                 if (vnic->rx_queue_cnt > 1) {
1373                         vnic->hash_type =
1374                                 bnxt_rte_to_hwrm_hash_types(rss->types);
1375                         vnic->hash_mode =
1376                         bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level);
1377
1378                         if (!rss->key_len) {
1379                                 /* If hash key has not been specified,
1380                                  * use random hash key.
1381                                  */
1382                                 prandom_bytes(vnic->rss_hash_key,
1383                                               HW_HASH_KEY_SIZE);
1384                         } else {
1385                                 if (rss->key_len > HW_HASH_KEY_SIZE)
1386                                         memcpy(vnic->rss_hash_key,
1387                                                rss->key,
1388                                                HW_HASH_KEY_SIZE);
1389                                 else
1390                                         memcpy(vnic->rss_hash_key,
1391                                                rss->key,
1392                                                rss->key_len);
1393                         }
1394                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1395                 } else {
1396                         PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1397                 }
1398
1399 vnic_found:
1400                 filter->dst_id = vnic->fw_vnic_id;
1401                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1402                 if (filter1 == NULL) {
1403                         rte_flow_error_set(error,
1404                                            ENOSPC,
1405                                            RTE_FLOW_ERROR_TYPE_ACTION,
1406                                            act,
1407                                            "New filter not available");
1408                         rc = -rte_errno;
1409                         goto ret;
1410                 }
1411
1412                 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1413                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1414                 break;
1415         case RTE_FLOW_ACTION_TYPE_MARK:
1416                 if (bp->mark_table == NULL) {
1417                         rte_flow_error_set(error,
1418                                            ENOMEM,
1419                                            RTE_FLOW_ERROR_TYPE_ACTION,
1420                                            act,
1421                                            "Mark table not allocated.");
1422                         rc = -rte_errno;
1423                         goto ret;
1424                 }
1425
1426                 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
1427                         PMD_DRV_LOG(DEBUG,
1428                                     "Disabling vector processing for mark\n");
1429                         bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts;
1430                         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1431                 }
1432
1433                 filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
1434                 filter->mark = ((const struct rte_flow_action_mark *)
1435                                 act->conf)->id;
1436                 PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
1437                 break;
1438         default:
1439                 rte_flow_error_set(error,
1440                                    EINVAL,
1441                                    RTE_FLOW_ERROR_TYPE_ACTION,
1442                                    act,
1443                                    "Invalid action.");
1444                 rc = -rte_errno;
1445                 goto ret;
1446         }
1447
1448 done:
1449         act = bnxt_flow_non_void_action(++act);
1450         while (act->type != RTE_FLOW_ACTION_TYPE_END)
1451                 goto start;
1452
1453         return rc;
1454 ret:
1455
1456         if (filter1) {
1457                 bnxt_hwrm_clear_l2_filter(bp, filter1);
1458                 bnxt_free_filter(bp, filter1);
1459         }
1460
1461         if (rte_errno)  {
1462                 if (vnic && STAILQ_EMPTY(&vnic->filter))
1463                         vnic->rx_queue_cnt = 0;
1464
1465                 if (rxq && !vnic->rx_queue_cnt)
1466                         rxq->vnic = &bp->vnic_info[0];
1467         }
1468         return -rte_errno;
1469 }
1470
1471 static
1472 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1473                                           struct bnxt_filter_info *filter)
1474 {
1475         struct bnxt_vnic_info *vnic = NULL;
1476         unsigned int i;
1477
1478         for (i = 0; i < bp->max_vnics; i++) {
1479                 vnic = &bp->vnic_info[i];
1480                 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1481                     filter->dst_id == vnic->fw_vnic_id) {
1482                         PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1483                                     vnic->ff_pool_idx);
1484                         return vnic;
1485                 }
1486         }
1487         return NULL;
1488 }
1489
1490 static int
1491 bnxt_flow_validate(struct rte_eth_dev *dev,
1492                    const struct rte_flow_attr *attr,
1493                    const struct rte_flow_item pattern[],
1494                    const struct rte_flow_action actions[],
1495                    struct rte_flow_error *error)
1496 {
1497         struct bnxt *bp = dev->data->dev_private;
1498         struct bnxt_vnic_info *vnic = NULL;
1499         struct bnxt_filter_info *filter;
1500         int ret = 0;
1501
1502         bnxt_acquire_flow_lock(bp);
1503         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1504         if (ret != 0) {
1505                 bnxt_release_flow_lock(bp);
1506                 return ret;
1507         }
1508
1509         filter = bnxt_get_unused_filter(bp);
1510         if (filter == NULL) {
1511                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1512                 bnxt_release_flow_lock(bp);
1513                 return -ENOMEM;
1514         }
1515
1516         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1517                                            error, filter);
1518         if (ret)
1519                 goto exit;
1520
1521         vnic = find_matching_vnic(bp, filter);
1522         if (vnic) {
1523                 if (STAILQ_EMPTY(&vnic->filter)) {
1524                         rte_free(vnic->fw_grp_ids);
1525                         bnxt_hwrm_vnic_ctx_free(bp, vnic);
1526                         bnxt_hwrm_vnic_free(bp, vnic);
1527                         vnic->rx_queue_cnt = 0;
1528                         PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1529                 }
1530         }
1531
1532         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1533                 bnxt_hwrm_clear_em_filter(bp, filter);
1534         else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1535                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1536         else
1537                 bnxt_hwrm_clear_l2_filter(bp, filter);
1538
1539 exit:
1540         /* No need to hold on to this filter if we are just validating flow */
1541         bnxt_free_filter(bp, filter);
1542         bnxt_release_flow_lock(bp);
1543
1544         return ret;
1545 }
1546
1547 static void
1548 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1549                    struct bnxt_filter_info *new_filter)
1550 {
1551         /* Clear the new L2 filter that was created in the previous step in
1552          * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1553          * filter which points to the new destination queue and so we clear
1554          * the previous L2 filter. For ntuple filters, we are going to reuse
1555          * the old L2 filter and create new NTUPLE filter with this new
1556          * destination queue subsequently during bnxt_flow_create. So we
1557          * decrement the ref cnt of the L2 filter that would've been bumped
1558          * up previously in bnxt_validate_and_parse_flow as the old n-tuple
1559          * filter that was referencing it will be deleted now.
1560          */
1561         bnxt_hwrm_clear_l2_filter(bp, old_filter);
1562         if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1563                 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1564         } else {
1565                 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1566                         bnxt_hwrm_clear_em_filter(bp, old_filter);
1567                 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1568                         bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1569         }
1570 }
1571
1572 static int
1573 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1574 {
1575         struct bnxt_filter_info *mf;
1576         struct rte_flow *flow;
1577         int i;
1578
1579         for (i = bp->max_vnics - 1; i >= 0; i--) {
1580                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1581
1582                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1583                         continue;
1584
1585                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1586                         mf = flow->filter;
1587
1588                         if (mf->filter_type == nf->filter_type &&
1589                             mf->flags == nf->flags &&
1590                             mf->src_port == nf->src_port &&
1591                             mf->src_port_mask == nf->src_port_mask &&
1592                             mf->dst_port == nf->dst_port &&
1593                             mf->dst_port_mask == nf->dst_port_mask &&
1594                             mf->ip_protocol == nf->ip_protocol &&
1595                             mf->ip_addr_type == nf->ip_addr_type &&
1596                             mf->ethertype == nf->ethertype &&
1597                             mf->vni == nf->vni &&
1598                             mf->tunnel_type == nf->tunnel_type &&
1599                             mf->l2_ovlan == nf->l2_ovlan &&
1600                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1601                             mf->l2_ivlan == nf->l2_ivlan &&
1602                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1603                             !memcmp(mf->l2_addr, nf->l2_addr,
1604                                     RTE_ETHER_ADDR_LEN) &&
1605                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1606                                     RTE_ETHER_ADDR_LEN) &&
1607                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1608                                     RTE_ETHER_ADDR_LEN) &&
1609                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1610                                     RTE_ETHER_ADDR_LEN) &&
1611                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1612                                     sizeof(nf->src_ipaddr)) &&
1613                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1614                                     sizeof(nf->src_ipaddr_mask)) &&
1615                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1616                                     sizeof(nf->dst_ipaddr)) &&
1617                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1618                                     sizeof(nf->dst_ipaddr_mask))) {
1619                                 if (mf->dst_id == nf->dst_id)
1620                                         return -EEXIST;
1621                                 /* Free the old filter, update flow
1622                                  * with new filter
1623                                  */
1624                                 bnxt_update_filter(bp, mf, nf);
1625                                 STAILQ_REMOVE(&vnic->filter, mf,
1626                                               bnxt_filter_info, next);
1627                                 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1628                                 bnxt_free_filter(bp, mf);
1629                                 flow->filter = nf;
1630                                 return -EXDEV;
1631                         }
1632                 }
1633         }
1634         return 0;
1635 }
1636
1637 static void
1638 bnxt_setup_flow_counter(struct bnxt *bp)
1639 {
1640         if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
1641             !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) {
1642                 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1643                                   bnxt_flow_cnt_alarm_cb,
1644                                   (void *)bp);
1645                 bp->flags |= BNXT_FLAG_FC_THREAD;
1646         }
1647 }
1648
1649 void bnxt_flow_cnt_alarm_cb(void *arg)
1650 {
1651         int rc = 0;
1652         struct bnxt *bp = arg;
1653
1654         if (!bp->flow_stat->rx_fc_out_tbl.va) {
1655                 PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n");
1656                 bnxt_cancel_fc_thread(bp);
1657                 return;
1658         }
1659
1660         if (!bp->flow_stat->flow_count) {
1661                 bnxt_cancel_fc_thread(bp);
1662                 return;
1663         }
1664
1665         if (!bp->eth_dev->data->dev_started) {
1666                 bnxt_cancel_fc_thread(bp);
1667                 return;
1668         }
1669
1670         rc = bnxt_flow_stats_req(bp);
1671         if (rc) {
1672                 PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n");
1673                 return;
1674         }
1675
1676         rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1677                           bnxt_flow_cnt_alarm_cb,
1678                           (void *)bp);
1679 }
1680
1681
1682 static struct rte_flow *
1683 bnxt_flow_create(struct rte_eth_dev *dev,
1684                  const struct rte_flow_attr *attr,
1685                  const struct rte_flow_item pattern[],
1686                  const struct rte_flow_action actions[],
1687                  struct rte_flow_error *error)
1688 {
1689         struct bnxt *bp = dev->data->dev_private;
1690         struct bnxt_vnic_info *vnic = NULL;
1691         struct bnxt_filter_info *filter;
1692         bool update_flow = false;
1693         struct rte_flow *flow;
1694         int ret = 0;
1695         uint32_t tun_type, flow_id;
1696
1697         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1698                 rte_flow_error_set(error, EINVAL,
1699                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1700                                    "Failed to create flow, Not a Trusted VF!");
1701                 return NULL;
1702         }
1703
1704         if (!dev->data->dev_started) {
1705                 rte_flow_error_set(error,
1706                                    EINVAL,
1707                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1708                                    NULL,
1709                                    "Device must be started");
1710                 return NULL;
1711         }
1712
1713         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1714         if (!flow) {
1715                 rte_flow_error_set(error, ENOMEM,
1716                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1717                                    "Failed to allocate memory");
1718                 return flow;
1719         }
1720
1721         bnxt_acquire_flow_lock(bp);
1722         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1723         if (ret != 0) {
1724                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1725                 goto free_flow;
1726         }
1727
1728         filter = bnxt_get_unused_filter(bp);
1729         if (filter == NULL) {
1730                 rte_flow_error_set(error, ENOSPC,
1731                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1732                                    "Not enough resources for a new flow");
1733                 goto free_flow;
1734         }
1735
1736         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1737                                            error, filter);
1738         if (ret != 0)
1739                 goto free_filter;
1740
1741         ret = bnxt_match_filter(bp, filter);
1742         if (ret == -EEXIST) {
1743                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1744                 /* Clear the filter that was created as part of
1745                  * validate_and_parse_flow() above
1746                  */
1747                 bnxt_hwrm_clear_l2_filter(bp, filter);
1748                 goto free_filter;
1749         } else if (ret == -EXDEV) {
1750                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1751                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1752                 update_flow = true;
1753         }
1754
1755         /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1756          * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1757          * in such a case.
1758          */
1759         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1760             filter->enables == filter->tunnel_type) {
1761                 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1762                 if (ret) {
1763                         rte_flow_error_set(error, -ret,
1764                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1765                                            "Unable to query tunnel to VF");
1766                         goto free_filter;
1767                 }
1768                 if (tun_type == (1U << filter->tunnel_type)) {
1769                         ret =
1770                         bnxt_hwrm_tunnel_redirect_free(bp,
1771                                                        filter->tunnel_type);
1772                         if (ret) {
1773                                 PMD_DRV_LOG(ERR,
1774                                             "Unable to free existing tunnel\n");
1775                                 rte_flow_error_set(error, -ret,
1776                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1777                                                    NULL,
1778                                                    "Unable to free preexisting "
1779                                                    "tunnel on VF");
1780                                 goto free_filter;
1781                         }
1782                 }
1783                 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1784                 if (ret) {
1785                         rte_flow_error_set(error, -ret,
1786                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1787                                            "Unable to redirect tunnel to VF");
1788                         goto free_filter;
1789                 }
1790                 vnic = &bp->vnic_info[0];
1791                 goto done;
1792         }
1793
1794         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1795                 filter->enables |=
1796                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1797                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1798                 if (ret != 0) {
1799                         rte_flow_error_set(error, -ret,
1800                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1801                                            "Failed to create EM filter");
1802                         goto free_filter;
1803                 }
1804         }
1805
1806         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1807                 filter->enables |=
1808                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1809                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1810                 if (ret != 0) {
1811                         rte_flow_error_set(error, -ret,
1812                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1813                                            "Failed to create ntuple filter");
1814                         goto free_filter;
1815                 }
1816         }
1817
1818         if (BNXT_RFS_NEEDS_VNIC(bp))
1819                 vnic = find_matching_vnic(bp, filter);
1820         else
1821                 vnic = BNXT_GET_DEFAULT_VNIC(bp);
1822 done:
1823         if (!ret || update_flow) {
1824                 flow->filter = filter;
1825                 flow->vnic = vnic;
1826                 if (update_flow) {
1827                         ret = -EXDEV;
1828                         goto free_flow;
1829                 }
1830
1831                 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1832                         PMD_DRV_LOG(DEBUG,
1833                                     "Mark action: mark id 0x%x, flow id 0x%x\n",
1834                                     filter->mark, filter->flow_id);
1835
1836                         /* TCAM and EM should be 16-bit only.
1837                          * Other modes not supported.
1838                          */
1839                         flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1840                         if (bp->mark_table[flow_id].valid) {
1841                                 rte_flow_error_set(error, EEXIST,
1842                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1843                                                    NULL,
1844                                                    "Flow with mark id exists");
1845                                 bnxt_clear_one_vnic_filter(bp, filter);
1846                                 goto free_filter;
1847                         }
1848                         bp->mark_table[flow_id].valid = true;
1849                         bp->mark_table[flow_id].mark_id = filter->mark;
1850                 }
1851
1852                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1853                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1854
1855                 if (BNXT_FLOW_XSTATS_EN(bp))
1856                         bp->flow_stat->flow_count++;
1857                 bnxt_release_flow_lock(bp);
1858                 bnxt_setup_flow_counter(bp);
1859                 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
1860                 return flow;
1861         }
1862
1863 free_filter:
1864         bnxt_free_filter(bp, filter);
1865 free_flow:
1866         if (ret == -EEXIST)
1867                 rte_flow_error_set(error, ret,
1868                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1869                                    "Matching Flow exists.");
1870         else if (ret == -EXDEV)
1871                 rte_flow_error_set(error, 0,
1872                                    RTE_FLOW_ERROR_TYPE_NONE, NULL,
1873                                    "Flow with pattern exists, updating destination queue");
1874         else if (!rte_errno)
1875                 rte_flow_error_set(error, -ret,
1876                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1877                                    "Failed to create flow.");
1878         rte_free(flow);
1879         flow = NULL;
1880         bnxt_release_flow_lock(bp);
1881         return flow;
1882 }
1883
1884 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1885                                                struct bnxt_filter_info *filter,
1886                                                struct rte_flow_error *error)
1887 {
1888         uint16_t tun_dst_fid;
1889         uint32_t tun_type;
1890         int ret = 0;
1891
1892         ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1893         if (ret) {
1894                 rte_flow_error_set(error, -ret,
1895                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1896                                    "Unable to query tunnel to VF");
1897                 return ret;
1898         }
1899         if (tun_type == (1U << filter->tunnel_type)) {
1900                 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1901                                                      &tun_dst_fid);
1902                 if (ret) {
1903                         rte_flow_error_set(error, -ret,
1904                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1905                                            NULL,
1906                                            "tunnel_redirect info cmd fail");
1907                         return ret;
1908                 }
1909                 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1910                             tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1911
1912                 /* Tunnel doesn't belong to this VF, so don't send HWRM
1913                  * cmd, just delete the flow from driver
1914                  */
1915                 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1916                         PMD_DRV_LOG(ERR,
1917                                     "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1918                 else
1919                         ret = bnxt_hwrm_tunnel_redirect_free(bp,
1920                                                         filter->tunnel_type);
1921         }
1922         return ret;
1923 }
1924
1925 static int
1926 _bnxt_flow_destroy(struct bnxt *bp,
1927                    struct rte_flow *flow,
1928                     struct rte_flow_error *error)
1929 {
1930         struct bnxt_filter_info *filter;
1931         struct bnxt_vnic_info *vnic;
1932         int ret = 0;
1933         uint32_t flow_id;
1934
1935         filter = flow->filter;
1936         vnic = flow->vnic;
1937
1938         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1939             filter->enables == filter->tunnel_type) {
1940                 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error);
1941                 if (!ret)
1942                         goto done;
1943                 else
1944                         return ret;
1945         }
1946
1947         ret = bnxt_match_filter(bp, filter);
1948         if (ret == 0)
1949                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1950
1951         if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1952                 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1953                 memset(&bp->mark_table[flow_id], 0,
1954                        sizeof(bp->mark_table[flow_id]));
1955                 filter->flow_id = 0;
1956         }
1957
1958         ret = bnxt_clear_one_vnic_filter(bp, filter);
1959
1960 done:
1961         if (!ret) {
1962                 /* If it is a L2 drop filter, when the filter is created,
1963                  * the FW updates the BC/MC records.
1964                  * Once this filter is removed, issue the set_rx_mask command
1965                  * to reset the BC/MC records in the HW to the settings
1966                  * before the drop counter is created.
1967                  */
1968                 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
1969                         bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
1970
1971                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1972                 bnxt_free_filter(bp, filter);
1973                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1974                 rte_free(flow);
1975                 if (BNXT_FLOW_XSTATS_EN(bp))
1976                         bp->flow_stat->flow_count--;
1977
1978                 /* If this was the last flow associated with this vnic,
1979                  * switch the queue back to RSS pool.
1980                  */
1981                 if (vnic && !vnic->func_default &&
1982                     STAILQ_EMPTY(&vnic->flow_list)) {
1983                         rte_free(vnic->fw_grp_ids);
1984                         if (vnic->rx_queue_cnt > 1)
1985                                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1986
1987                         bnxt_hwrm_vnic_free(bp, vnic);
1988                         vnic->rx_queue_cnt = 0;
1989                 }
1990         } else {
1991                 rte_flow_error_set(error, -ret,
1992                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1993                                    "Failed to destroy flow.");
1994         }
1995
1996         return ret;
1997 }
1998
1999 static int
2000 bnxt_flow_destroy(struct rte_eth_dev *dev,
2001                   struct rte_flow *flow,
2002                   struct rte_flow_error *error)
2003 {
2004         struct bnxt *bp = dev->data->dev_private;
2005         int ret = 0;
2006
2007         bnxt_acquire_flow_lock(bp);
2008         if (!flow) {
2009                 rte_flow_error_set(error, EINVAL,
2010                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2011                                    "Invalid flow: failed to destroy flow.");
2012                 bnxt_release_flow_lock(bp);
2013                 return -EINVAL;
2014         }
2015
2016         if (!flow->filter) {
2017                 rte_flow_error_set(error, EINVAL,
2018                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2019                                    "Invalid flow: failed to destroy flow.");
2020                 bnxt_release_flow_lock(bp);
2021                 return -EINVAL;
2022         }
2023         ret = _bnxt_flow_destroy(bp, flow, error);
2024         bnxt_release_flow_lock(bp);
2025
2026         return ret;
2027 }
2028
2029 void bnxt_cancel_fc_thread(struct bnxt *bp)
2030 {
2031         bp->flags &= ~BNXT_FLAG_FC_THREAD;
2032         rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp);
2033 }
2034
2035 static int
2036 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2037 {
2038         struct bnxt *bp = dev->data->dev_private;
2039         struct bnxt_vnic_info *vnic;
2040         struct rte_flow *flow;
2041         unsigned int i;
2042         int ret = 0;
2043
2044         bnxt_acquire_flow_lock(bp);
2045         for (i = 0; i < bp->max_vnics; i++) {
2046                 vnic = &bp->vnic_info[i];
2047                 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
2048                         continue;
2049
2050                 while (!STAILQ_EMPTY(&vnic->flow_list)) {
2051                         flow = STAILQ_FIRST(&vnic->flow_list);
2052
2053                         if (!flow->filter)
2054                                 continue;
2055
2056                         ret = _bnxt_flow_destroy(bp, flow, error);
2057                         if (ret)
2058                                 break;
2059                 }
2060         }
2061
2062         bnxt_cancel_fc_thread(bp);
2063         bnxt_release_flow_lock(bp);
2064
2065         return ret;
2066 }
2067
2068 const struct rte_flow_ops bnxt_flow_ops = {
2069         .validate = bnxt_flow_validate,
2070         .create = bnxt_flow_create,
2071         .destroy = bnxt_flow_destroy,
2072         .flush = bnxt_flow_flush,
2073 };